Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
atomics.h
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * atomics.h
4 * Atomic operations.
5 *
6 * Hardware and compiler dependent functions for manipulating memory
7 * atomically and dealing with cache coherency. Used to implement locking
8 * facilities and lockless algorithms/data structures.
9 *
10 * To bring up postgres on a platform/compiler at the very least
11 * implementations for the following operations should be provided:
12 * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13 * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14 * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15 * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
16 *
17 * There exist generic, hardware independent, implementations for several
18 * compilers which might be sufficient, although possibly not optimal, for a
19 * new platform. If no such generic implementation is available spinlocks will
20 * be used to implement the 64-bit parts of the API.
21 *
22 * Implement _u64 atomics if and only if your platform can use them
23 * efficiently (and obviously correctly).
24 *
25 * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
26 * whenever possible. Writing correct code using these facilities is hard.
27 *
28 * For an introduction to using memory barriers within the PostgreSQL backend,
29 * see src/backend/storage/lmgr/README.barrier
30 *
31 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
32 * Portions Copyright (c) 1994, Regents of the University of California
33 *
34 * src/include/port/atomics.h
35 *
36 *-------------------------------------------------------------------------
37 */
38#ifndef ATOMICS_H
39#define ATOMICS_H
40
41#ifdef FRONTEND
42#error "atomics.h may not be included from frontend code"
43#endif
44
45#define INSIDE_ATOMICS_H
46
47#include <limits.h>
48
49/*
50 * First a set of architecture specific files is included.
51 *
52 * These files can provide the full set of atomics or can do pretty much
53 * nothing if all the compilers commonly used on these platforms provide
54 * usable generics.
55 *
56 * Don't add an inline assembly of the actual atomic operations if all the
57 * common implementations of your platform provide intrinsics. Intrinsics are
58 * much easier to understand and potentially support more architectures.
59 *
60 * It will often make sense to define memory barrier semantics here, since
61 * e.g. generic compiler intrinsics for x86 memory barriers can't know that
62 * postgres doesn't need x86 read/write barriers do anything more than a
63 * compiler barrier.
64 *
65 */
66#if defined(__arm__) || defined(__arm) || defined(__aarch64__)
68#elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
70#elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
72#endif
73
74/*
75 * Compiler specific, but architecture independent implementations.
76 *
77 * Provide architecture independent implementations of the atomic
78 * facilities. At the very least compiler barriers should be provided, but a
79 * full implementation of
80 * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
81 * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
82 * using compiler intrinsics are a good idea.
83 */
84/*
85 * gcc or compatible, including clang and icc.
86 */
87#if defined(__GNUC__) || defined(__INTEL_COMPILER)
89#elif defined(_MSC_VER)
91#else
92/* Unknown compiler. */
93#endif
94
95/* Fail if we couldn't find implementations of required facilities. */
96#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
97#error "could not find an implementation of pg_atomic_uint32"
98#endif
99#if !defined(pg_compiler_barrier_impl)
100#error "could not find an implementation of pg_compiler_barrier"
101#endif
102#if !defined(pg_memory_barrier_impl)
103#error "could not find an implementation of pg_memory_barrier_impl"
104#endif
105
106
107/*
108 * Provide a spinlock-based implementation of the 64 bit variants, if
109 * necessary.
110 */
112
113/*
114 * Provide additional operations using supported infrastructure. These are
115 * expected to be efficient if the underlying atomic operations are efficient.
116 */
117#include "port/atomics/generic.h"
118
119
120/*
121 * pg_compiler_barrier - prevent the compiler from moving code across
122 *
123 * A compiler barrier need not (and preferably should not) emit any actual
124 * machine code, but must act as an optimization fence: the compiler must not
125 * reorder loads or stores to main memory around the barrier. However, the
126 * CPU may still reorder loads or stores at runtime, if the architecture's
127 * memory model permits this.
128 */
129#define pg_compiler_barrier() pg_compiler_barrier_impl()
130
131/*
132 * pg_memory_barrier - prevent the CPU from reordering memory access
133 *
134 * A memory barrier must act as a compiler barrier, and in addition must
135 * guarantee that all loads and stores issued prior to the barrier are
136 * completed before any loads or stores issued after the barrier. Unless
137 * loads and stores are totally ordered (which is not the case on most
138 * architectures) this requires issuing some sort of memory fencing
139 * instruction.
140 */
141#define pg_memory_barrier() pg_memory_barrier_impl()
142
143/*
144 * pg_(read|write)_barrier - prevent the CPU from reordering memory access
145 *
146 * A read barrier must act as a compiler barrier, and in addition must
147 * guarantee that any loads issued prior to the barrier are completed before
148 * any loads issued after the barrier. Similarly, a write barrier acts
149 * as a compiler barrier, and also orders stores. Read and write barriers
150 * are thus weaker than a full memory barrier, but stronger than a compiler
151 * barrier. In practice, on machines with strong memory ordering, read and
152 * write barriers may require nothing more than a compiler barrier.
153 */
154#define pg_read_barrier() pg_read_barrier_impl()
155#define pg_write_barrier() pg_write_barrier_impl()
156
157/*
158 * Spinloop delay - Allow CPU to relax in busy loops
159 */
160#define pg_spin_delay() pg_spin_delay_impl()
161
162/*
163 * pg_atomic_init_flag - initialize atomic flag.
164 *
165 * No barrier semantics.
166 */
167static inline void
168pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
169{
170 pg_atomic_init_flag_impl(ptr);
171}
172
173/*
174 * pg_atomic_test_set_flag - TAS()
175 *
176 * Returns true if the flag has successfully been set, false otherwise.
177 *
178 * Acquire (including read barrier) semantics.
179 */
180static inline bool
181pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
182{
183 return pg_atomic_test_set_flag_impl(ptr);
184}
185
186/*
187 * pg_atomic_unlocked_test_flag - Check if the lock is free
188 *
189 * Returns true if the flag currently is not set, false otherwise.
190 *
191 * No barrier semantics.
192 */
193static inline bool
194pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
195{
196 return pg_atomic_unlocked_test_flag_impl(ptr);
197}
198
199/*
200 * pg_atomic_clear_flag - release lock set by TAS()
201 *
202 * Release (including write barrier) semantics.
203 */
204static inline void
205pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
206{
207 pg_atomic_clear_flag_impl(ptr);
208}
209
210
211/*
212 * pg_atomic_init_u32 - initialize atomic variable
213 *
214 * Has to be done before any concurrent usage..
215 *
216 * No barrier semantics.
217 */
218static inline void
220{
222
224}
225
226/*
227 * pg_atomic_read_u32 - unlocked read from atomic variable.
228 *
229 * The read is guaranteed to return a value as it has been written by this or
230 * another process at some point in the past. There's however no cache
231 * coherency interaction guaranteeing the value hasn't since been written to
232 * again.
233 *
234 * No barrier semantics.
235 */
236static inline uint32
238{
240 return pg_atomic_read_u32_impl(ptr);
241}
242
243/*
244 * pg_atomic_read_membarrier_u32 - read with barrier semantics.
245 *
246 * This read is guaranteed to return the current value, provided that the value
247 * is only ever updated via operations with barrier semantics, such as
248 * pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
249 * While this may be less performant than pg_atomic_read_u32(), it may be
250 * easier to reason about correctness with this function in less performance-
251 * sensitive code.
252 *
253 * Full barrier semantics.
254 */
255static inline uint32
257{
259
260 return pg_atomic_read_membarrier_u32_impl(ptr);
261}
262
263/*
264 * pg_atomic_write_u32 - write to atomic variable.
265 *
266 * The write is guaranteed to succeed as a whole, i.e. it's not possible to
267 * observe a partial write for any reader. Note that this correctly interacts
268 * with pg_atomic_compare_exchange_u32, in contrast to
269 * pg_atomic_unlocked_write_u32().
270 *
271 * No barrier semantics.
272 */
273static inline void
275{
277
279}
280
281/*
282 * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
283 *
284 * The write is guaranteed to succeed as a whole, i.e. it's not possible to
285 * observe a partial write for any reader. But note that writing this way is
286 * not guaranteed to correctly interact with read-modify-write operations like
287 * pg_atomic_compare_exchange_u32. This should only be used in cases where
288 * minor performance regressions due to atomics emulation are unacceptable.
289 *
290 * No barrier semantics.
291 */
292static inline void
294{
296
298}
299
300/*
301 * pg_atomic_write_membarrier_u32 - write with barrier semantics.
302 *
303 * The write is guaranteed to succeed as a whole, i.e., it's not possible to
304 * observe a partial write for any reader. Note that this correctly interacts
305 * with both pg_atomic_compare_exchange_u32() and
306 * pg_atomic_read_membarrier_u32(). While this may be less performant than
307 * pg_atomic_write_u32(), it may be easier to reason about correctness with
308 * this function in less performance-sensitive code.
309 *
310 * Full barrier semantics.
311 */
312static inline void
314{
316
317 pg_atomic_write_membarrier_u32_impl(ptr, val);
318}
319
320/*
321 * pg_atomic_exchange_u32 - exchange newval with current value
322 *
323 * Returns the old value of 'ptr' before the swap.
324 *
325 * Full barrier semantics.
326 */
327static inline uint32
329{
331
333}
334
335/*
336 * pg_atomic_compare_exchange_u32 - CAS operation
337 *
338 * Atomically compare the current value of ptr with *expected and store newval
339 * iff ptr and *expected have the same value. The current value of *ptr will
340 * always be stored in *expected.
341 *
342 * Return true if values have been exchanged, false otherwise.
343 *
344 * Full barrier semantics.
345 */
346static inline bool
348 uint32 *expected, uint32 newval)
349{
351 AssertPointerAlignment(expected, 4);
352
353 return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
354}
355
356/*
357 * pg_atomic_fetch_add_u32 - atomically add to variable
358 *
359 * Returns the value of ptr before the arithmetic operation.
360 *
361 * Full barrier semantics.
362 */
363static inline uint32
365{
367 return pg_atomic_fetch_add_u32_impl(ptr, add_);
368}
369
370/*
371 * pg_atomic_fetch_sub_u32 - atomically subtract from variable
372 *
373 * Returns the value of ptr before the arithmetic operation. Note that sub_
374 * may not be INT_MIN due to platform limitations.
375 *
376 * Full barrier semantics.
377 */
378static inline uint32
380{
382 Assert(sub_ != INT_MIN);
383 return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
384}
385
386/*
387 * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
388 *
389 * Returns the value of ptr before the arithmetic operation.
390 *
391 * Full barrier semantics.
392 */
393static inline uint32
395{
397 return pg_atomic_fetch_and_u32_impl(ptr, and_);
398}
399
400/*
401 * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
402 *
403 * Returns the value of ptr before the arithmetic operation.
404 *
405 * Full barrier semantics.
406 */
407static inline uint32
409{
411 return pg_atomic_fetch_or_u32_impl(ptr, or_);
412}
413
414/*
415 * pg_atomic_add_fetch_u32 - atomically add to variable
416 *
417 * Returns the value of ptr after the arithmetic operation.
418 *
419 * Full barrier semantics.
420 */
421static inline uint32
423{
425 return pg_atomic_add_fetch_u32_impl(ptr, add_);
426}
427
428/*
429 * pg_atomic_sub_fetch_u32 - atomically subtract from variable
430 *
431 * Returns the value of ptr after the arithmetic operation. Note that sub_ may
432 * not be INT_MIN due to platform limitations.
433 *
434 * Full barrier semantics.
435 */
436static inline uint32
438{
440 Assert(sub_ != INT_MIN);
441 return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
442}
443
444/* ----
445 * The 64 bit operations have the same semantics as their 32bit counterparts
446 * if they are available. Check the corresponding 32bit function for
447 * documentation.
448 * ----
449 */
450static inline void
452{
453 /*
454 * Can't necessarily enforce alignment - and don't need it - when using
455 * the spinlock based fallback implementation. Therefore only assert when
456 * not using it.
457 */
458#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
460#endif
462}
463
464static inline uint64
466{
467#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
469#endif
470 return pg_atomic_read_u64_impl(ptr);
471}
472
473static inline uint64
475{
476#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
478#endif
479 return pg_atomic_read_membarrier_u64_impl(ptr);
480}
481
482static inline void
484{
485#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
487#endif
489}
490
491static inline void
493{
494#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
496#endif
497 pg_atomic_write_membarrier_u64_impl(ptr, val);
498}
499
500static inline uint64
502{
503#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
505#endif
506 return pg_atomic_exchange_u64_impl(ptr, newval);
507}
508
509static inline bool
511 uint64 *expected, uint64 newval)
512{
513#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
515#endif
516 return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
517}
518
519static inline uint64
521{
522#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
524#endif
525 return pg_atomic_fetch_add_u64_impl(ptr, add_);
526}
527
528static inline uint64
530{
531#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
533#endif
534 Assert(sub_ != PG_INT64_MIN);
535 return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
536}
537
538static inline uint64
540{
541#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
543#endif
544 return pg_atomic_fetch_and_u64_impl(ptr, and_);
545}
546
547static inline uint64
549{
550#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
552#endif
553 return pg_atomic_fetch_or_u64_impl(ptr, or_);
554}
555
556static inline uint64
558{
559#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
561#endif
562 return pg_atomic_add_fetch_u64_impl(ptr, add_);
563}
564
565static inline uint64
567{
568#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
570#endif
571 Assert(sub_ != PG_INT64_MIN);
572 return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
573}
574
575/*
576 * Monotonically advance the given variable using only atomic operations until
577 * it's at least the target value. Returns the latest value observed, which
578 * may or may not be the target value.
579 *
580 * Full barrier semantics (even when value is unchanged).
581 */
582static inline uint64
584{
585 uint64 currval;
586
587#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
589#endif
590
591 currval = pg_atomic_read_u64_impl(ptr);
592 if (currval >= target)
593 {
595 return currval;
596 }
597
598 while (currval < target)
599 {
600 if (pg_atomic_compare_exchange_u64(ptr, &currval, target))
601 return target;
602 }
603
604 return currval;
605}
606
607#undef INSIDE_ATOMICS_H
608
609#endif /* ATOMICS_H */
static bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: arch-ppc.h:80
static uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: arch-ppc.h:131
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:62
void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: atomics.c:24
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:34
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:394
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:347
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:483
static void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:205
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:408
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:437
#define pg_memory_barrier()
Definition: atomics.h:141
static uint32 pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:256
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:379
static bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.h:510
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:293
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:219
static uint64 pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:474
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:364
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:422
static uint64 pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target)
Definition: atomics.h:583
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:520
static bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:181
static uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:566
static bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:194
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:274
static void pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:313
static uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
Definition: atomics.h:539
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:237
static uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
Definition: atomics.h:548
static uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:557
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:328
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:451
static void pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:492
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:465
static uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:529
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:501
static void pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:168
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:894
int64_t int64
Definition: c.h:536
int32_t int32
Definition: c.h:535
#define PG_INT64_MIN
Definition: c.h:597
uint64_t uint64
Definition: c.h:540
uint32_t uint32
Definition: c.h:539
static uint32 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: generic-msvc.h:61
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
static uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
Definition: generic.h:319
static void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:55
static void pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: generic.h:288
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
static void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: generic.h:151
#define newval
Assert(PointerIsAligned(start, uint64))
long val
Definition: informix.c:689