tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

waitable_event_posix.cc (12910B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
      3 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
      4 // Use of this source code is governed by a BSD-style license that can be
      5 // found in the LICENSE file.
      6 
      7 #include "base/waitable_event.h"
      8 
      9 #include "base/condition_variable.h"
     10 #include "base/lock.h"
     11 #include "base/message_loop.h"
     12 
     13 // -----------------------------------------------------------------------------
     14 // A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't
     15 // support cross-process events (where one process can signal an event which
     16 // others are waiting on). Because of this, we can avoid having one thread per
     17 // listener in several cases.
     18 //
     19 // The WaitableEvent maintains a list of waiters, protected by a lock. Each
     20 // waiter is either an async wait, in which case we have a Task and the
     21 // MessageLoop to run it on, or a blocking wait, in which case we have the
     22 // condition variable to signal.
     23 //
     24 // Waiting involves grabbing the lock and adding oneself to the wait list. Async
     25 // waits can be canceled, which means grabbing the lock and removing oneself
     26 // from the list.
     27 //
     28 // Waiting on multiple events is handled by adding a single, synchronous wait to
     29 // the wait-list of many events. An event passes a pointer to itself when
     30 // firing a waiter and so we can store that pointer to find out which event
     31 // triggered.
     32 // -----------------------------------------------------------------------------
     33 
     34 namespace base {
     35 
     36 // -----------------------------------------------------------------------------
     37 // This is just an abstract base class for waking the two types of waiters
     38 // -----------------------------------------------------------------------------
     39 WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled)
     40    : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) {}
     41 
     42 WaitableEvent::~WaitableEvent() {}
     43 
     44 void WaitableEvent::Reset() {
     45  AutoLock locked(kernel_->lock_);
     46  kernel_->signaled_ = false;
     47 }
     48 
     49 void WaitableEvent::Signal() {
     50  AutoLock locked(kernel_->lock_);
     51 
     52  if (kernel_->signaled_) return;
     53 
     54  if (kernel_->manual_reset_) {
     55    SignalAll();
     56    kernel_->signaled_ = true;
     57  } else {
     58    // In the case of auto reset, if no waiters were woken, we remain
     59    // signaled.
     60    if (!SignalOne()) kernel_->signaled_ = true;
     61  }
     62 }
     63 
     64 bool WaitableEvent::IsSignaled() {
     65  AutoLock locked(kernel_->lock_);
     66 
     67  const bool result = kernel_->signaled_;
     68  if (result && !kernel_->manual_reset_) kernel_->signaled_ = false;
     69  return result;
     70 }
     71 
     72 // -----------------------------------------------------------------------------
     73 // Synchronous waits
     74 
     75 // -----------------------------------------------------------------------------
     76 // This is an synchronous waiter. The thread is waiting on the given condition
     77 // variable and the fired flag in this object.
     78 // -----------------------------------------------------------------------------
     79 class SyncWaiter : public WaitableEvent::Waiter {
     80 public:
     81  SyncWaiter(ConditionVariable* cv, Lock* lock)
     82      : fired_(false), cv_(cv), lock_(lock), signaling_event_(NULL) {}
     83 
     84  bool Fire(WaitableEvent* signaling_event) override {
     85    AutoLock locked(*lock_);
     86 
     87    if (fired_) {
     88      return false;
     89    }
     90 
     91    fired_ = true;
     92    signaling_event_ = signaling_event;
     93 
     94    cv_->Broadcast();
     95 
     96    // SyncWaiters are stack allocated on the stack of the blocking thread.
     97    return true;
     98  }
     99 
    100  WaitableEvent* signaled_event() const { return signaling_event_; }
    101 
    102  // ---------------------------------------------------------------------------
    103  // These waiters are always stack allocated and don't delete themselves. Thus
    104  // there's no problem and the ABA tag is the same as the object pointer.
    105  // ---------------------------------------------------------------------------
    106  bool Compare(void* tag) override { return this == tag; }
    107 
    108  // ---------------------------------------------------------------------------
    109  // Called with lock held.
    110  // ---------------------------------------------------------------------------
    111  bool fired() const { return fired_; }
    112 
    113  // ---------------------------------------------------------------------------
    114  // During a TimedWait, we need a way to make sure that an auto-reset
    115  // WaitableEvent doesn't think that this event has been signaled between
    116  // unlocking it and removing it from the wait-list. Called with lock held.
    117  // ---------------------------------------------------------------------------
    118  void Disable() { fired_ = true; }
    119 
    120 private:
    121  bool fired_;
    122  ConditionVariable* const cv_;
    123  Lock* const lock_;
    124  WaitableEvent* signaling_event_;  // The WaitableEvent which woke us
    125 };
    126 
    127 bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
    128  const TimeTicks end_time(TimeTicks::Now() + max_time);
    129  const bool finite_time = max_time.ToInternalValue() >= 0;
    130 
    131  kernel_->lock_.Acquire();
    132  if (kernel_->signaled_) {
    133    if (!kernel_->manual_reset_) {
    134      // In this case we were signaled when we had no waiters. Now that
    135      // someone has waited upon us, we can automatically reset.
    136      kernel_->signaled_ = false;
    137    }
    138 
    139    kernel_->lock_.Release();
    140    return true;
    141  }
    142 
    143  Lock lock;
    144  lock.Acquire();
    145  ConditionVariable cv(&lock);
    146  SyncWaiter sw(&cv, &lock);
    147 
    148  Enqueue(&sw);
    149  kernel_->lock_.Release();
    150  // We are violating locking order here by holding the SyncWaiter lock but not
    151  // the WaitableEvent lock. However, this is safe because we don't lock @lock_
    152  // again before unlocking it.
    153 
    154  for (;;) {
    155    const TimeTicks current_time(TimeTicks::Now());
    156 
    157    if (sw.fired() || (finite_time && current_time >= end_time)) {
    158      const bool return_value = sw.fired();
    159 
    160      // We can't acquire @lock_ before releasing @lock (because of locking
    161      // order), however, inbetween the two a signal could be fired and @sw
    162      // would accept it, however we will still return false, so the signal
    163      // would be lost on an auto-reset WaitableEvent. Thus we call Disable
    164      // which makes sw::Fire return false.
    165      sw.Disable();
    166      lock.Release();
    167 
    168      kernel_->lock_.Acquire();
    169      kernel_->Dequeue(&sw, &sw);
    170      kernel_->lock_.Release();
    171 
    172      return return_value;
    173    }
    174 
    175    if (finite_time) {
    176      const TimeDelta max_wait(end_time - current_time);
    177      cv.TimedWait(max_wait);
    178    } else {
    179      cv.Wait();
    180    }
    181  }
    182 }
    183 
    184 bool WaitableEvent::Wait() { return TimedWait(TimeDelta::FromSeconds(-1)); }
    185 
    186 // -----------------------------------------------------------------------------
    187 
    188 // -----------------------------------------------------------------------------
    189 // Synchronous waiting on multiple objects.
    190 
    191 static bool  // StrictWeakOrdering
    192 cmp_fst_addr(const std::pair<WaitableEvent*, unsigned>& a,
    193             const std::pair<WaitableEvent*, unsigned>& b) {
    194  return a.first < b.first;
    195 }
    196 
    197 // static
    198 size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, size_t count) {
    199  DCHECK(count) << "Cannot wait on no events";
    200 
    201  // We need to acquire the locks in a globally consistent order. Thus we sort
    202  // the array of waitables by address. We actually sort a pairs so that we can
    203  // map back to the original index values later.
    204  std::vector<std::pair<WaitableEvent*, size_t> > waitables;
    205  waitables.reserve(count);
    206  for (size_t i = 0; i < count; ++i)
    207    waitables.push_back(std::make_pair(raw_waitables[i], i));
    208 
    209  DCHECK_EQ(count, waitables.size());
    210 
    211  sort(waitables.begin(), waitables.end(), cmp_fst_addr);
    212 
    213  // The set of waitables must be distinct. Since we have just sorted by
    214  // address, we can check this cheaply by comparing pairs of consecutive
    215  // elements.
    216  for (size_t i = 0; i < waitables.size() - 1; ++i) {
    217    DCHECK(waitables[i].first != waitables[i + 1].first);
    218  }
    219 
    220  Lock lock;
    221  ConditionVariable cv(&lock);
    222  SyncWaiter sw(&cv, &lock);
    223 
    224  const size_t r = EnqueueMany(&waitables[0], count, &sw);
    225  if (r) {
    226    // One of the events is already signaled. The SyncWaiter has not been
    227    // enqueued anywhere. EnqueueMany returns the count of remaining waitables
    228    // when the signaled one was seen, so the index of the signaled event is
    229    // @count - @r.
    230    return waitables[count - r].second;
    231  }
    232 
    233  // At this point, we hold the locks on all the WaitableEvents and we have
    234  // enqueued our waiter in them all.
    235  lock.Acquire();
    236  // Release the WaitableEvent locks in the reverse order
    237  for (size_t i = 0; i < count; ++i) {
    238    waitables[count - (1 + i)].first->kernel_->lock_.Release();
    239  }
    240 
    241  for (;;) {
    242    if (sw.fired()) break;
    243 
    244    cv.Wait();
    245  }
    246  lock.Release();
    247 
    248  // The address of the WaitableEvent which fired is stored in the SyncWaiter.
    249  WaitableEvent* const signaled_event = sw.signaled_event();
    250  // This will store the index of the raw_waitables which fired.
    251  size_t signaled_index = 0;
    252 
    253  // Take the locks of each WaitableEvent in turn (except the signaled one) and
    254  // remove our SyncWaiter from the wait-list
    255  for (size_t i = 0; i < count; ++i) {
    256    if (raw_waitables[i] != signaled_event) {
    257      raw_waitables[i]->kernel_->lock_.Acquire();
    258      // There's no possible ABA issue with the address of the SyncWaiter here
    259      // because it lives on the stack. Thus the tag value is just the pointer
    260      // value again.
    261      raw_waitables[i]->kernel_->Dequeue(&sw, &sw);
    262      raw_waitables[i]->kernel_->lock_.Release();
    263    } else {
    264      signaled_index = i;
    265    }
    266  }
    267 
    268  return signaled_index;
    269 }
    270 
    271 // -----------------------------------------------------------------------------
    272 // If return value == 0:
    273 //   The locks of the WaitableEvents have been taken in order and the Waiter has
    274 //   been enqueued in the wait-list of each. None of the WaitableEvents are
    275 //   currently signaled
    276 // else:
    277 //   None of the WaitableEvent locks are held. The Waiter has not been enqueued
    278 //   in any of them and the return value is the index of the first WaitableEvent
    279 //   which was signaled, from the end of the array.
    280 // -----------------------------------------------------------------------------
    281 // static
    282 size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
    283                                  size_t count, Waiter* waiter) {
    284  if (!count) return 0;
    285 
    286  waitables[0].first->kernel_->lock_.Acquire();
    287  if (waitables[0].first->kernel_->signaled_) {
    288    if (!waitables[0].first->kernel_->manual_reset_)
    289      waitables[0].first->kernel_->signaled_ = false;
    290    waitables[0].first->kernel_->lock_.Release();
    291    return count;
    292  }
    293 
    294  const size_t r = EnqueueMany(waitables + 1, count - 1, waiter);
    295  if (r) {
    296    waitables[0].first->kernel_->lock_.Release();
    297  } else {
    298    waitables[0].first->Enqueue(waiter);
    299  }
    300 
    301  return r;
    302 }
    303 
    304 // -----------------------------------------------------------------------------
    305 
    306 // -----------------------------------------------------------------------------
    307 // Private functions...
    308 
    309 // -----------------------------------------------------------------------------
    310 // Wake all waiting waiters. Called with lock held.
    311 // -----------------------------------------------------------------------------
    312 bool WaitableEvent::SignalAll() {
    313  bool signaled_at_least_one = false;
    314 
    315  for (std::list<Waiter*>::iterator i = kernel_->waiters_.begin();
    316       i != kernel_->waiters_.end(); ++i) {
    317    if ((*i)->Fire(this)) signaled_at_least_one = true;
    318  }
    319 
    320  kernel_->waiters_.clear();
    321  return signaled_at_least_one;
    322 }
    323 
    324 // ---------------------------------------------------------------------------
    325 // Try to wake a single waiter. Return true if one was woken. Called with lock
    326 // held.
    327 // ---------------------------------------------------------------------------
    328 bool WaitableEvent::SignalOne() {
    329  for (;;) {
    330    if (kernel_->waiters_.empty()) return false;
    331 
    332    const bool r = (*kernel_->waiters_.begin())->Fire(this);
    333    kernel_->waiters_.pop_front();
    334    if (r) return true;
    335  }
    336 }
    337 
    338 // -----------------------------------------------------------------------------
    339 // Add a waiter to the list of those waiting. Called with lock held.
    340 // -----------------------------------------------------------------------------
    341 void WaitableEvent::Enqueue(Waiter* waiter) {
    342  kernel_->waiters_.push_back(waiter);
    343 }
    344 
    345 // -----------------------------------------------------------------------------
    346 // Remove a waiter from the list of those waiting. Return true if the waiter was
    347 // actually removed. Called with lock held.
    348 // -----------------------------------------------------------------------------
    349 bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) {
    350  for (std::list<Waiter*>::iterator i = waiters_.begin(); i != waiters_.end();
    351       ++i) {
    352    if (*i == waiter && (*i)->Compare(tag)) {
    353      waiters_.erase(i);
    354      return true;
    355    }
    356  }
    357 
    358  return false;
    359 }
    360 
    361 // -----------------------------------------------------------------------------
    362 
    363 }  // namespace base