1818#include " src/__support/threads/mutex.h" // Mutex
1919#include " src/__support/threads/raw_mutex.h" // RawMutex
2020#include " src/__support/threads/sleep.h"
21+ #include " src/__support/time/abs_timeout.h"
2122
2223#ifdef LIBC_COPT_TIMEOUT_ENSURE_MONOTONICITY
2324#include " src/__support/time/monotonicity.h"
@@ -85,57 +86,53 @@ class CndVar {
8586 Requeued = 3 ,
8687 };
8788
88- struct QueueNode {
89- QueueNode *prev;
90- QueueNode *next;
89+ template <typename T> struct QueueNode {
90+ T *prev;
91+ T *next;
92+
93+ LIBC_INLINE T *self () { return static_cast <T *>(this ); }
9194
9295 // We use cyclic dummy node to avoid handing corner cases.
9396 LIBC_INLINE void ensure_queue_initialization () {
9497 if (LIBC_UNLIKELY (prev == nullptr ))
95- prev = next = this ;
98+ prev = next = self () ;
9699 }
97100
98101 // Assume `this` the dummy node of queue. Push back `waiter` to the queue.
99- LIBC_INLINE void push_back (QueueNode *waiter) {
102+ LIBC_INLINE void push_back (T *waiter) {
100103 ensure_queue_initialization ();
101- waiter->next = this ;
104+ waiter->next = self () ;
102105 waiter->prev = prev;
103106 waiter->next ->prev = waiter;
104107 waiter->prev ->next = waiter;
105108 }
106109
107110 // Remove `waiter` from the queue.
108- LIBC_INLINE static void remove (QueueNode *waiter) {
111+ LIBC_INLINE static void remove (T *waiter) {
109112 waiter->next ->prev = waiter->prev ;
110113 waiter->prev ->next = waiter->next ;
111114 waiter->prev = waiter->next = waiter;
112115 }
113116
114- // Assume `this` the dummy node of queue. Pop the first waiter from the
115- // queue.
116- LIBC_INLINE QueueNode *pop_front () {
117+ LIBC_INLINE bool is_empty () {
117118 ensure_queue_initialization ();
118- if (next == this )
119- return nullptr ;
120- QueueNode *first = next;
121- remove (first);
122- return first;
119+ return self () == next;
123120 }
124121
125122 // Assume `this` is the dummy node of the queue. Separate nodes before
126123 // cursor into a separate queue.
127- LIBC_INLINE void separate (QueueNode *cursor) {
128- QueueNode *removed_head = this ->next ;
129- QueueNode *removed_tail = cursor->prev ;
124+ LIBC_INLINE void separate (T *cursor) {
125+ T *removed_head = this ->next ;
126+ T *removed_tail = cursor->prev ;
130127 this ->next = cursor;
131- cursor->prev = this ;
128+ cursor->prev = self () ;
132129 removed_tail->next = removed_head;
133130 removed_head->prev = removed_tail;
134131 }
135132 };
136133
137134 // This node will be on the per-thread stack.
138- struct CndWaiter : QueueNode {
135+ struct CndWaiter : QueueNode<CndWaiter> {
139136 cpp::Atomic<CancellationBarrier *> cancellation_barrier;
140137 RawMutex barrier;
141138 cpp::Atomic<uint8_t > state;
@@ -158,7 +155,7 @@ class CndVar {
158155 // save trailing padding bytes, such that is_shared
159156 // can be introduced without extra space.
160157 union {
161- QueueNode waiter_queue;
158+ QueueNode<CndWaiter> waiter_queue;
162159 cpp::Atomic<size_t > shared_waiters;
163160 };
164161
@@ -196,14 +193,10 @@ class CndVar {
196193 // acquire the lock and dequeue themselves.
197194 {
198195 cpp::lock_guard lock (queue_lock);
199- // Still need to check the queue validity. CndVar maybe
200- // intialized by an empty intializer.
201- waiter_queue.ensure_queue_initialization ();
202- if (waiter_queue.next == &waiter_queue)
196+ if (waiter_queue.is_empty ())
203197 return ;
204- for (cursor = static_cast <CndWaiter *>(waiter_queue.next );
205- cursor != &waiter_queue;
206- cursor = static_cast <CndWaiter *>(cursor->next )) {
198+ for (cursor = waiter_queue.next ; cursor != waiter_queue.self ();
199+ cursor = cursor->next ) {
207200 if (limit == 0 )
208201 break ;
209202 uint8_t expected = Waiting;
@@ -294,7 +287,7 @@ class CndVar {
294287 // we haven't consumed the signal before timeout reaches.
295288 {
296289 cpp::lock_guard lock (queue_lock);
297- QueueNode ::remove (&waiter);
290+ CndWaiter ::remove (&waiter);
298291 }
299292 waiter.confirm_cancellation ();
300293 } else if (!locked) {
@@ -307,7 +300,7 @@ class CndVar {
307300
308301 // Reacquire the mutex lock. If error ever happens, we still wake up
309302 // our successor so that remaining waiters can continue. However, we treat
310- // outselves as not owning the mute and we don't touch the contention
303+ // outselves as not owning the mutex and we don't touch the contention
311304 // bit.
312305 MutexError mutex_result = mutex->lock ();
313306 // If we are requeued, we need to establish contention after lock, otherwise
@@ -319,8 +312,8 @@ class CndVar {
319312 // If there is other in the queue after us, we need to wake the next waiter.
320313 // If we cancelled, we should naturally have waiter.next == &waiter
321314 if (waiter.next != &waiter) {
322- auto *next_waiter = static_cast <CndWaiter *>( waiter.next ) ;
323- QueueNode ::remove (&waiter);
315+ auto *next_waiter = waiter.next ;
316+ CndWaiter ::remove (&waiter);
324317 auto &next_barrier_futex = next_waiter->barrier .get_raw_futex ();
325318 auto &mutex_futex = mutex->get_raw_futex ();
326319 // the following is basically an inlined version of mutex::unlock
0 commit comments