Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
lock.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * lock.c
4 * POSTGRES primary lock mechanism
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/storage/lmgr/lock.c
12 *
13 * NOTES
14 * A lock table is a shared memory hash table. When
15 * a process tries to acquire a lock of a type that conflicts
16 * with existing locks, it is put to sleep using the routines
17 * in storage/lmgr/proc.c.
18 *
19 * For the most part, this code should be invoked via lmgr.c
20 * or another lock-management module, not directly.
21 *
22 * Interface:
23 *
24 * LockManagerShmemInit(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 * LockAcquire(), LockRelease(), LockReleaseAll(),
26 * LockCheckConflicts(), GrantLock()
27 *
28 *-------------------------------------------------------------------------
29 */
30#include "postgres.h"
31
32#include <signal.h>
33#include <unistd.h>
34
35#include "access/transam.h"
36#include "access/twophase.h"
38#include "access/xlog.h"
39#include "access/xlogutils.h"
40#include "miscadmin.h"
41#include "pg_trace.h"
42#include "storage/lmgr.h"
43#include "storage/proc.h"
44#include "storage/procarray.h"
45#include "storage/spin.h"
46#include "storage/standby.h"
47#include "utils/memutils.h"
48#include "utils/ps_status.h"
49#include "utils/resowner.h"
50
51
52/* GUC variables */
53int max_locks_per_xact; /* used to set the lock table size */
54bool log_lock_failures = false;
55
56#define NLOCKENTS() \
57 mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
58
59
60/*
61 * Data structures defining the semantics of the standard lock methods.
62 *
63 * The conflict table defines the semantics of the various lock modes.
64 */
65static const LOCKMASK LockConflicts[] = {
66 0,
67
68 /* AccessShareLock */
70
71 /* RowShareLock */
73
74 /* RowExclusiveLock */
77
78 /* ShareUpdateExclusiveLock */
82
83 /* ShareLock */
87
88 /* ShareRowExclusiveLock */
92
93 /* ExclusiveLock */
98
99 /* AccessExclusiveLock */
104
105};
106
107/* Names of lock modes, for debug printouts */
108static const char *const lock_mode_names[] =
109{
110 "INVALID",
111 "AccessShareLock",
112 "RowShareLock",
113 "RowExclusiveLock",
114 "ShareUpdateExclusiveLock",
115 "ShareLock",
116 "ShareRowExclusiveLock",
117 "ExclusiveLock",
118 "AccessExclusiveLock"
119};
120
121#ifndef LOCK_DEBUG
122static bool Dummy_trace = false;
123#endif
124
129#ifdef LOCK_DEBUG
130 &Trace_locks
131#else
133#endif
134};
135
140#ifdef LOCK_DEBUG
141 &Trace_userlocks
142#else
144#endif
145};
146
147/*
148 * map from lock method id to the lock table data structures
149 */
150static const LockMethod LockMethods[] = {
151 NULL,
154};
155
156
157/* Record that's written to 2PC state file when a lock is persisted */
158typedef struct TwoPhaseLockRecord
159{
163
164
165/*
166 * Count of the number of fast path lock slots we believe to be used. This
167 * might be higher than the real number if another backend has transferred
168 * our locks to the primary lock table, but it can never be lower than the
169 * real value, since only we can acquire locks on our own behalf.
170 *
171 * XXX Allocate a static array of the maximum size. We could use a pointer
172 * and then allocate just the right size to save a couple kB, but then we
173 * would have to initialize that, while for the static array that happens
174 * automatically. Doesn't seem worth the extra complexity.
175 */
177
178/*
179 * Flag to indicate if the relation extension lock is held by this backend.
180 * This flag is used to ensure that while holding the relation extension lock
181 * we don't try to acquire a heavyweight lock on any other object. This
182 * restriction implies that the relation extension lock won't ever participate
183 * in the deadlock cycle because we can never wait for any other heavyweight
184 * lock after acquiring this lock.
185 *
186 * Such a restriction is okay for relation extension locks as unlike other
187 * heavyweight locks these are not held till the transaction end. These are
188 * taken for a short duration to extend a particular relation and then
189 * released.
190 */
191static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
192
193/*
194 * Number of fast-path locks per backend - size of the arrays in PGPROC.
195 * This is set only once during start, before initializing shared memory,
196 * and remains constant after that.
197 *
198 * We set the limit based on max_locks_per_transaction GUC, because that's
199 * the best information about expected number of locks per backend we have.
200 * See InitializeFastPathLocks() for details.
201 */
203
204/*
205 * Macros to calculate the fast-path group and index for a relation.
206 *
207 * The formula is a simple hash function, designed to spread the OIDs a bit,
208 * so that even contiguous values end up in different groups. In most cases
209 * there will be gaps anyway, but the multiplication should help a bit.
210 *
211 * The selected constant (49157) is a prime not too close to 2^k, and it's
212 * small enough to not cause overflows (in 64-bit).
213 *
214 * We can assume that FastPathLockGroupsPerBackend is a power-of-two per
215 * InitializeFastPathLocks().
216 */
217#define FAST_PATH_REL_GROUP(rel) \
218 (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
219
220/*
221 * Given the group/slot indexes, calculate the slot index in the whole array
222 * of fast-path lock slots.
223 */
224#define FAST_PATH_SLOT(group, index) \
225 (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
226 AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
227 ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
228
229/*
230 * Given a slot index (into the whole per-backend array), calculated using
231 * the FAST_PATH_SLOT macro, split it into group and index (in the group).
232 */
233#define FAST_PATH_GROUP(index) \
234 (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
235 ((index) / FP_LOCK_SLOTS_PER_GROUP))
236#define FAST_PATH_INDEX(index) \
237 (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
238 ((index) % FP_LOCK_SLOTS_PER_GROUP))
239
240/* Macros for manipulating proc->fpLockBits */
241#define FAST_PATH_BITS_PER_SLOT 3
242#define FAST_PATH_LOCKNUMBER_OFFSET 1
243#define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
244#define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
245#define FAST_PATH_GET_BITS(proc, n) \
246 ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
247#define FAST_PATH_BIT_POSITION(n, l) \
248 (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
249 AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
250 AssertMacro((n) < FastPathLockSlotsPerBackend()), \
251 ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
252#define FAST_PATH_SET_LOCKMODE(proc, n, l) \
253 FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
254#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
255 FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
256#define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
257 (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
258
259/*
260 * The fast-path lock mechanism is concerned only with relation locks on
261 * unshared relations by backends bound to a database. The fast-path
262 * mechanism exists mostly to accelerate acquisition and release of locks
263 * that rarely conflict. Because ShareUpdateExclusiveLock is
264 * self-conflicting, it can't use the fast-path mechanism; but it also does
265 * not conflict with any of the locks that do, so we can ignore it completely.
266 */
267#define EligibleForRelationFastPath(locktag, mode) \
268 ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
269 (locktag)->locktag_type == LOCKTAG_RELATION && \
270 (locktag)->locktag_field1 == MyDatabaseId && \
271 MyDatabaseId != InvalidOid && \
272 (mode) < ShareUpdateExclusiveLock)
273#define ConflictsWithRelationFastPath(locktag, mode) \
274 ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
275 (locktag)->locktag_type == LOCKTAG_RELATION && \
276 (locktag)->locktag_field1 != InvalidOid && \
277 (mode) > ShareUpdateExclusiveLock)
278
279static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
280static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
281static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
282 const LOCKTAG *locktag, uint32 hashcode);
284
285/*
286 * To make the fast-path lock mechanism work, we must have some way of
287 * preventing the use of the fast-path when a conflicting lock might be present.
288 * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
289 * and maintain an integer count of the number of "strong" lockers
290 * in each partition. When any "strong" lockers are present (which is
291 * hopefully not very often), the fast-path mechanism can't be used, and we
292 * must fall back to the slower method of pushing matching locks directly
293 * into the main lock tables.
294 *
295 * The deadlock detector does not know anything about the fast path mechanism,
296 * so any locks that might be involved in a deadlock must be transferred from
297 * the fast-path queues to the main lock table.
298 */
299
300#define FAST_PATH_STRONG_LOCK_HASH_BITS 10
301#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
302 (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
303#define FastPathStrongLockHashPartition(hashcode) \
304 ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
305
306typedef struct
307{
308 slock_t mutex;
311
313
314
315/*
316 * Pointers to hash tables containing lock state
317 *
318 * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
319 * shared memory; LockMethodLocalHash is local to each backend.
320 */
324
325
326/* private state for error cleanup */
330
331
332#ifdef LOCK_DEBUG
333
334/*------
335 * The following configuration options are available for lock debugging:
336 *
337 * TRACE_LOCKS -- give a bunch of output what's going on in this file
338 * TRACE_USERLOCKS -- same but for user locks
339 * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
340 * (use to avoid output on system tables)
341 * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
342 * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
343 *
344 * Furthermore, but in storage/lmgr/lwlock.c:
345 * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
346 *
347 * Define LOCK_DEBUG at compile time to get all these enabled.
348 * --------
349 */
350
351int Trace_lock_oidmin = FirstNormalObjectId;
352bool Trace_locks = false;
353bool Trace_userlocks = false;
354int Trace_lock_table = 0;
355bool Debug_deadlocks = false;
356
357
358inline static bool
359LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
360{
361 return
363 ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
364 || (Trace_lock_table &&
365 (tag->locktag_field2 == Trace_lock_table));
366}
367
368
369inline static void
370LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
371{
372 if (LOCK_DEBUG_ENABLED(&lock->tag))
373 elog(LOG,
374 "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
375 "req(%d,%d,%d,%d,%d,%d,%d)=%d "
376 "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
377 where, lock,
381 lock->grantMask,
382 lock->requested[1], lock->requested[2], lock->requested[3],
383 lock->requested[4], lock->requested[5], lock->requested[6],
384 lock->requested[7], lock->nRequested,
385 lock->granted[1], lock->granted[2], lock->granted[3],
386 lock->granted[4], lock->granted[5], lock->granted[6],
387 lock->granted[7], lock->nGranted,
388 dclist_count(&lock->waitProcs),
389 LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
390}
391
392
393inline static void
394PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
395{
396 if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
397 elog(LOG,
398 "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
399 where, proclockP, proclockP->tag.myLock,
400 PROCLOCK_LOCKMETHOD(*(proclockP)),
401 proclockP->tag.myProc, (int) proclockP->holdMask);
402}
403#else /* not LOCK_DEBUG */
404
405#define LOCK_PRINT(where, lock, type) ((void) 0)
406#define PROCLOCK_PRINT(where, proclockP) ((void) 0)
407#endif /* not LOCK_DEBUG */
408
409
410static uint32 proclock_hash(const void *key, Size keysize);
411static void RemoveLocalLock(LOCALLOCK *locallock);
412static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
413 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
414static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
415static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
416static void FinishStrongLockAcquire(void);
417static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
418static void waitonlock_error_callback(void *arg);
419static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
420static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
421static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
422 PROCLOCK *proclock, LockMethod lockMethodTable);
423static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
424 LockMethod lockMethodTable, uint32 hashcode,
425 bool wakeupNeeded);
426static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
427 LOCKTAG *locktag, LOCKMODE lockmode,
428 bool decrement_strong_lock_count);
429static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
431
432
433/*
434 * Initialize the lock manager's shmem data structures.
435 *
436 * This is called from CreateSharedMemoryAndSemaphores(), which see for more
437 * comments. In the normal postmaster case, the shared hash tables are
438 * created here, and backends inherit pointers to them via fork(). In the
439 * EXEC_BACKEND case, each backend re-executes this code to obtain pointers to
440 * the already existing shared hash tables. In either case, each backend must
441 * also call InitLockManagerAccess() to create the locallock hash table.
442 */
443void
445{
446 HASHCTL info;
447 int64 init_table_size,
448 max_table_size;
449 bool found;
450
451 /*
452 * Compute init/max size to request for lock hashtables. Note these
453 * calculations must agree with LockManagerShmemSize!
454 */
455 max_table_size = NLOCKENTS();
456 init_table_size = max_table_size / 2;
457
458 /*
459 * Allocate hash table for LOCK structs. This stores per-locked-object
460 * information.
461 */
462 info.keysize = sizeof(LOCKTAG);
463 info.entrysize = sizeof(LOCK);
465
466 LockMethodLockHash = ShmemInitHash("LOCK hash",
467 init_table_size,
468 max_table_size,
469 &info,
471
472 /* Assume an average of 2 holders per lock */
473 max_table_size *= 2;
474 init_table_size *= 2;
475
476 /*
477 * Allocate hash table for PROCLOCK structs. This stores
478 * per-lock-per-holder information.
479 */
480 info.keysize = sizeof(PROCLOCKTAG);
481 info.entrysize = sizeof(PROCLOCK);
482 info.hash = proclock_hash;
484
485 LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
486 init_table_size,
487 max_table_size,
488 &info,
490
491 /*
492 * Allocate fast-path structures.
493 */
495 ShmemInitStruct("Fast Path Strong Relation Lock Data",
496 sizeof(FastPathStrongRelationLockData), &found);
497 if (!found)
499}
500
501/*
502 * Initialize the lock manager's backend-private data structures.
503 */
504void
506{
507 /*
508 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
509 * counts and resource owner information.
510 */
511 HASHCTL info;
512
513 info.keysize = sizeof(LOCALLOCKTAG);
514 info.entrysize = sizeof(LOCALLOCK);
515
516 LockMethodLocalHash = hash_create("LOCALLOCK hash",
517 16,
518 &info,
520}
521
522
523/*
524 * Fetch the lock method table associated with a given lock
525 */
528{
529 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
530
531 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
532 return LockMethods[lockmethodid];
533}
534
535/*
536 * Fetch the lock method table associated with a given locktag
537 */
540{
541 LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
542
543 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
544 return LockMethods[lockmethodid];
545}
546
547
548/*
549 * Compute the hash code associated with a LOCKTAG.
550 *
551 * To avoid unnecessary recomputations of the hash code, we try to do this
552 * just once per function, and then pass it around as needed. Aside from
553 * passing the hashcode to hash_search_with_hash_value(), we can extract
554 * the lock partition number from the hashcode.
555 */
556uint32
558{
559 return get_hash_value(LockMethodLockHash, locktag);
560}
561
562/*
563 * Compute the hash code associated with a PROCLOCKTAG.
564 *
565 * Because we want to use just one set of partition locks for both the
566 * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
567 * fall into the same partition number as their associated LOCKs.
568 * dynahash.c expects the partition number to be the low-order bits of
569 * the hash code, and therefore a PROCLOCKTAG's hash code must have the
570 * same low-order bits as the associated LOCKTAG's hash code. We achieve
571 * this with this specialized hash function.
572 */
573static uint32
574proclock_hash(const void *key, Size keysize)
575{
576 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
577 uint32 lockhash;
578 Datum procptr;
579
580 Assert(keysize == sizeof(PROCLOCKTAG));
581
582 /* Look into the associated LOCK object, and compute its hash code */
583 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
584
585 /*
586 * To make the hash code also depend on the PGPROC, we xor the proc
587 * struct's address into the hash code, left-shifted so that the
588 * partition-number bits don't change. Since this is only a hash, we
589 * don't care if we lose high-order bits of the address; use an
590 * intermediate variable to suppress cast-pointer-to-int warnings.
591 */
592 procptr = PointerGetDatum(proclocktag->myProc);
593 lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
594
595 return lockhash;
596}
597
598/*
599 * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
600 * for its underlying LOCK.
601 *
602 * We use this just to avoid redundant calls of LockTagHashCode().
603 */
604static inline uint32
605ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
606{
607 uint32 lockhash = hashcode;
608 Datum procptr;
609
610 /*
611 * This must match proclock_hash()!
612 */
613 procptr = PointerGetDatum(proclocktag->myProc);
614 lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
615
616 return lockhash;
617}
618
619/*
620 * Given two lock modes, return whether they would conflict.
621 */
622bool
624{
625 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
626
627 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
628 return true;
629
630 return false;
631}
632
633/*
634 * LockHeldByMe -- test whether lock 'locktag' is held by the current
635 * transaction
636 *
637 * Returns true if current transaction holds a lock on 'tag' of mode
638 * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
639 * ("Stronger" is defined as "numerically higher", which is a bit
640 * semantically dubious but is OK for the purposes we use this for.)
641 */
642bool
643LockHeldByMe(const LOCKTAG *locktag,
644 LOCKMODE lockmode, bool orstronger)
645{
646 LOCALLOCKTAG localtag;
647 LOCALLOCK *locallock;
648
649 /*
650 * See if there is a LOCALLOCK entry for this lock and lockmode
651 */
652 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
653 localtag.lock = *locktag;
654 localtag.mode = lockmode;
655
657 &localtag,
658 HASH_FIND, NULL);
659
660 if (locallock && locallock->nLocks > 0)
661 return true;
662
663 if (orstronger)
664 {
665 LOCKMODE slockmode;
666
667 for (slockmode = lockmode + 1;
668 slockmode <= MaxLockMode;
669 slockmode++)
670 {
671 if (LockHeldByMe(locktag, slockmode, false))
672 return true;
673 }
674 }
675
676 return false;
677}
678
679#ifdef USE_ASSERT_CHECKING
680/*
681 * GetLockMethodLocalHash -- return the hash of local locks, for modules that
682 * evaluate assertions based on all locks held.
683 */
684HTAB *
685GetLockMethodLocalHash(void)
686{
687 return LockMethodLocalHash;
688}
689#endif
690
691/*
692 * LockHasWaiters -- look up 'locktag' and check if releasing this
693 * lock would wake up other processes waiting for it.
694 */
695bool
696LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
697{
698 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
699 LockMethod lockMethodTable;
700 LOCALLOCKTAG localtag;
701 LOCALLOCK *locallock;
702 LOCK *lock;
703 PROCLOCK *proclock;
704 LWLock *partitionLock;
705 bool hasWaiters = false;
706
707 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
708 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
709 lockMethodTable = LockMethods[lockmethodid];
710 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
711 elog(ERROR, "unrecognized lock mode: %d", lockmode);
712
713#ifdef LOCK_DEBUG
714 if (LOCK_DEBUG_ENABLED(locktag))
715 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
716 locktag->locktag_field1, locktag->locktag_field2,
717 lockMethodTable->lockModeNames[lockmode]);
718#endif
719
720 /*
721 * Find the LOCALLOCK entry for this lock and lockmode
722 */
723 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
724 localtag.lock = *locktag;
725 localtag.mode = lockmode;
726
728 &localtag,
729 HASH_FIND, NULL);
730
731 /*
732 * let the caller print its own error message, too. Do not ereport(ERROR).
733 */
734 if (!locallock || locallock->nLocks <= 0)
735 {
736 elog(WARNING, "you don't own a lock of type %s",
737 lockMethodTable->lockModeNames[lockmode]);
738 return false;
739 }
740
741 /*
742 * Check the shared lock table.
743 */
744 partitionLock = LockHashPartitionLock(locallock->hashcode);
745
746 LWLockAcquire(partitionLock, LW_SHARED);
747
748 /*
749 * We don't need to re-find the lock or proclock, since we kept their
750 * addresses in the locallock table, and they couldn't have been removed
751 * while we were holding a lock on them.
752 */
753 lock = locallock->lock;
754 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
755 proclock = locallock->proclock;
756 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
757
758 /*
759 * Double-check that we are actually holding a lock of the type we want to
760 * release.
761 */
762 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
763 {
764 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
765 LWLockRelease(partitionLock);
766 elog(WARNING, "you don't own a lock of type %s",
767 lockMethodTable->lockModeNames[lockmode]);
768 RemoveLocalLock(locallock);
769 return false;
770 }
771
772 /*
773 * Do the checking.
774 */
775 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
776 hasWaiters = true;
777
778 LWLockRelease(partitionLock);
779
780 return hasWaiters;
781}
782
783/*
784 * LockAcquire -- Check for lock conflicts, sleep if conflict found,
785 * set lock if/when no conflicts.
786 *
787 * Inputs:
788 * locktag: unique identifier for the lockable object
789 * lockmode: lock mode to acquire
790 * sessionLock: if true, acquire lock for session not current transaction
791 * dontWait: if true, don't wait to acquire lock
792 *
793 * Returns one of:
794 * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
795 * LOCKACQUIRE_OK lock successfully acquired
796 * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
797 * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
798 *
799 * In the normal case where dontWait=false and the caller doesn't need to
800 * distinguish a freshly acquired lock from one already taken earlier in
801 * this same transaction, there is no need to examine the return value.
802 *
803 * Side Effects: The lock is acquired and recorded in lock tables.
804 *
805 * NOTE: if we wait for the lock, there is no way to abort the wait
806 * short of aborting the transaction.
807 */
809LockAcquire(const LOCKTAG *locktag,
810 LOCKMODE lockmode,
811 bool sessionLock,
812 bool dontWait)
813{
814 return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
815 true, NULL, false);
816}
817
818/*
819 * LockAcquireExtended - allows us to specify additional options
820 *
821 * reportMemoryError specifies whether a lock request that fills the lock
822 * table should generate an ERROR or not. Passing "false" allows the caller
823 * to attempt to recover from lock-table-full situations, perhaps by forcibly
824 * canceling other lock holders and then retrying. Note, however, that the
825 * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
826 * in combination with dontWait = true, as the cause of failure couldn't be
827 * distinguished.
828 *
829 * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
830 * table entry if a lock is successfully acquired, or NULL if not.
831 *
832 * logLockFailure indicates whether to log details when a lock acquisition
833 * fails with dontWait = true.
834 */
837 LOCKMODE lockmode,
838 bool sessionLock,
839 bool dontWait,
840 bool reportMemoryError,
841 LOCALLOCK **locallockp,
842 bool logLockFailure)
843{
844 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
845 LockMethod lockMethodTable;
846 LOCALLOCKTAG localtag;
847 LOCALLOCK *locallock;
848 LOCK *lock;
849 PROCLOCK *proclock;
850 bool found;
851 ResourceOwner owner;
852 uint32 hashcode;
853 LWLock *partitionLock;
854 bool found_conflict;
855 ProcWaitStatus waitResult;
856 bool log_lock = false;
857
858 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
859 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
860 lockMethodTable = LockMethods[lockmethodid];
861 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
862 elog(ERROR, "unrecognized lock mode: %d", lockmode);
863
864 if (RecoveryInProgress() && !InRecovery &&
865 (locktag->locktag_type == LOCKTAG_OBJECT ||
866 locktag->locktag_type == LOCKTAG_RELATION) &&
867 lockmode > RowExclusiveLock)
869 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
870 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
871 lockMethodTable->lockModeNames[lockmode]),
872 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
873
874#ifdef LOCK_DEBUG
875 if (LOCK_DEBUG_ENABLED(locktag))
876 elog(LOG, "LockAcquire: lock [%u,%u] %s",
877 locktag->locktag_field1, locktag->locktag_field2,
878 lockMethodTable->lockModeNames[lockmode]);
879#endif
880
881 /* Identify owner for lock */
882 if (sessionLock)
883 owner = NULL;
884 else
885 owner = CurrentResourceOwner;
886
887 /*
888 * Find or create a LOCALLOCK entry for this lock and lockmode
889 */
890 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
891 localtag.lock = *locktag;
892 localtag.mode = lockmode;
893
895 &localtag,
896 HASH_ENTER, &found);
897
898 /*
899 * if it's a new locallock object, initialize it
900 */
901 if (!found)
902 {
903 locallock->lock = NULL;
904 locallock->proclock = NULL;
905 locallock->hashcode = LockTagHashCode(&(localtag.lock));
906 locallock->nLocks = 0;
907 locallock->holdsStrongLockCount = false;
908 locallock->lockCleared = false;
909 locallock->numLockOwners = 0;
910 locallock->maxLockOwners = 8;
911 locallock->lockOwners = NULL; /* in case next line fails */
912 locallock->lockOwners = (LOCALLOCKOWNER *)
914 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
915 }
916 else
917 {
918 /* Make sure there will be room to remember the lock */
919 if (locallock->numLockOwners >= locallock->maxLockOwners)
920 {
921 int newsize = locallock->maxLockOwners * 2;
922
923 locallock->lockOwners = (LOCALLOCKOWNER *)
924 repalloc(locallock->lockOwners,
925 newsize * sizeof(LOCALLOCKOWNER));
926 locallock->maxLockOwners = newsize;
927 }
928 }
929 hashcode = locallock->hashcode;
930
931 if (locallockp)
932 *locallockp = locallock;
933
934 /*
935 * If we already hold the lock, we can just increase the count locally.
936 *
937 * If lockCleared is already set, caller need not worry about absorbing
938 * sinval messages related to the lock's object.
939 */
940 if (locallock->nLocks > 0)
941 {
942 GrantLockLocal(locallock, owner);
943 if (locallock->lockCleared)
945 else
947 }
948
949 /*
950 * We don't acquire any other heavyweight lock while holding the relation
951 * extension lock. We do allow to acquire the same relation extension
952 * lock more than once but that case won't reach here.
953 */
954 Assert(!IsRelationExtensionLockHeld);
955
956 /*
957 * Prepare to emit a WAL record if acquisition of this lock needs to be
958 * replayed in a standby server.
959 *
960 * Here we prepare to log; after lock is acquired we'll issue log record.
961 * This arrangement simplifies error recovery in case the preparation step
962 * fails.
963 *
964 * Only AccessExclusiveLocks can conflict with lock types that read-only
965 * transactions can acquire in a standby server. Make sure this definition
966 * matches the one in GetRunningTransactionLocks().
967 */
968 if (lockmode >= AccessExclusiveLock &&
969 locktag->locktag_type == LOCKTAG_RELATION &&
972 {
974 log_lock = true;
975 }
976
977 /*
978 * Attempt to take lock via fast path, if eligible. But if we remember
979 * having filled up the fast path array, we don't attempt to make any
980 * further use of it until we release some locks. It's possible that some
981 * other backend has transferred some of those locks to the shared hash
982 * table, leaving space free, but it's not worth acquiring the LWLock just
983 * to check. It's also possible that we're acquiring a second or third
984 * lock type on a relation we have already locked using the fast-path, but
985 * for now we don't worry about that case either.
986 */
987 if (EligibleForRelationFastPath(locktag, lockmode) &&
989 {
990 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
991 bool acquired;
992
993 /*
994 * LWLockAcquire acts as a memory sequencing point, so it's safe to
995 * assume that any strong locker whose increment to
996 * FastPathStrongRelationLocks->counts becomes visible after we test
997 * it has yet to begin to transfer fast-path locks.
998 */
1000 if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
1001 acquired = false;
1002 else
1003 acquired = FastPathGrantRelationLock(locktag->locktag_field2,
1004 lockmode);
1006 if (acquired)
1007 {
1008 /*
1009 * The locallock might contain stale pointers to some old shared
1010 * objects; we MUST reset these to null before considering the
1011 * lock to be acquired via fast-path.
1012 */
1013 locallock->lock = NULL;
1014 locallock->proclock = NULL;
1015 GrantLockLocal(locallock, owner);
1016 return LOCKACQUIRE_OK;
1017 }
1018 }
1019
1020 /*
1021 * If this lock could potentially have been taken via the fast-path by
1022 * some other backend, we must (temporarily) disable further use of the
1023 * fast-path for this lock tag, and migrate any locks already taken via
1024 * this method to the main lock table.
1025 */
1026 if (ConflictsWithRelationFastPath(locktag, lockmode))
1027 {
1028 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1029
1030 BeginStrongLockAcquire(locallock, fasthashcode);
1031 if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1032 hashcode))
1033 {
1035 if (locallock->nLocks == 0)
1036 RemoveLocalLock(locallock);
1037 if (locallockp)
1038 *locallockp = NULL;
1039 if (reportMemoryError)
1040 ereport(ERROR,
1041 (errcode(ERRCODE_OUT_OF_MEMORY),
1042 errmsg("out of shared memory"),
1043 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1044 else
1045 return LOCKACQUIRE_NOT_AVAIL;
1046 }
1047 }
1048
1049 /*
1050 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1051 * take it via the fast-path, either, so we've got to mess with the shared
1052 * lock table.
1053 */
1054 partitionLock = LockHashPartitionLock(hashcode);
1055
1056 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1057
1058 /*
1059 * Find or create lock and proclock entries with this tag
1060 *
1061 * Note: if the locallock object already existed, it might have a pointer
1062 * to the lock already ... but we should not assume that that pointer is
1063 * valid, since a lock object with zero hold and request counts can go
1064 * away anytime. So we have to use SetupLockInTable() to recompute the
1065 * lock and proclock pointers, even if they're already set.
1066 */
1067 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1068 hashcode, lockmode);
1069 if (!proclock)
1070 {
1072 LWLockRelease(partitionLock);
1073 if (locallock->nLocks == 0)
1074 RemoveLocalLock(locallock);
1075 if (locallockp)
1076 *locallockp = NULL;
1077 if (reportMemoryError)
1078 ereport(ERROR,
1079 (errcode(ERRCODE_OUT_OF_MEMORY),
1080 errmsg("out of shared memory"),
1081 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1082 else
1083 return LOCKACQUIRE_NOT_AVAIL;
1084 }
1085 locallock->proclock = proclock;
1086 lock = proclock->tag.myLock;
1087 locallock->lock = lock;
1088
1089 /*
1090 * If lock requested conflicts with locks requested by waiters, must join
1091 * wait queue. Otherwise, check for conflict with already-held locks.
1092 * (That's last because most complex check.)
1093 */
1094 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1095 found_conflict = true;
1096 else
1097 found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1098 lock, proclock);
1099
1100 if (!found_conflict)
1101 {
1102 /* No conflict with held or previously requested locks */
1103 GrantLock(lock, proclock, lockmode);
1104 waitResult = PROC_WAIT_STATUS_OK;
1105 }
1106 else
1107 {
1108 /*
1109 * Join the lock's wait queue. We call this even in the dontWait
1110 * case, because JoinWaitQueue() may discover that we can acquire the
1111 * lock immediately after all.
1112 */
1113 waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1114 }
1115
1116 if (waitResult == PROC_WAIT_STATUS_ERROR)
1117 {
1118 /*
1119 * We're not getting the lock because a deadlock was detected already
1120 * while trying to join the wait queue, or because we would have to
1121 * wait but the caller requested no blocking.
1122 *
1123 * Undo the changes to shared entries before releasing the partition
1124 * lock.
1125 */
1127
1128 if (proclock->holdMask == 0)
1129 {
1130 uint32 proclock_hashcode;
1131
1132 proclock_hashcode = ProcLockHashCode(&proclock->tag,
1133 hashcode);
1134 dlist_delete(&proclock->lockLink);
1135 dlist_delete(&proclock->procLink);
1137 &(proclock->tag),
1138 proclock_hashcode,
1140 NULL))
1141 elog(PANIC, "proclock table corrupted");
1142 }
1143 else
1144 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1145 lock->nRequested--;
1146 lock->requested[lockmode]--;
1147 LOCK_PRINT("LockAcquire: did not join wait queue",
1148 lock, lockmode);
1149 Assert((lock->nRequested > 0) &&
1150 (lock->requested[lockmode] >= 0));
1151 Assert(lock->nGranted <= lock->nRequested);
1152 LWLockRelease(partitionLock);
1153 if (locallock->nLocks == 0)
1154 RemoveLocalLock(locallock);
1155
1156 if (dontWait)
1157 {
1158 /*
1159 * Log lock holders and waiters as a detail log message if
1160 * logLockFailure = true and lock acquisition fails with dontWait
1161 * = true
1162 */
1163 if (logLockFailure)
1164 {
1166 lock_waiters_sbuf,
1167 lock_holders_sbuf;
1168 const char *modename;
1169 int lockHoldersNum = 0;
1170
1172 initStringInfo(&lock_waiters_sbuf);
1173 initStringInfo(&lock_holders_sbuf);
1174
1175 DescribeLockTag(&buf, &locallock->tag.lock);
1176 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1177 lockmode);
1178
1179 /* Gather a list of all lock holders and waiters */
1180 LWLockAcquire(partitionLock, LW_SHARED);
1181 GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1182 &lock_waiters_sbuf, &lockHoldersNum);
1183 LWLockRelease(partitionLock);
1184
1185 ereport(LOG,
1186 (errmsg("process %d could not obtain %s on %s",
1187 MyProcPid, modename, buf.data),
1189 "Process holding the lock: %s, Wait queue: %s.",
1190 "Processes holding the lock: %s, Wait queue: %s.",
1191 lockHoldersNum,
1192 lock_holders_sbuf.data,
1193 lock_waiters_sbuf.data)));
1194
1195 pfree(buf.data);
1196 pfree(lock_holders_sbuf.data);
1197 pfree(lock_waiters_sbuf.data);
1198 }
1199 if (locallockp)
1200 *locallockp = NULL;
1201 return LOCKACQUIRE_NOT_AVAIL;
1202 }
1203 else
1204 {
1206 /* DeadLockReport() will not return */
1207 }
1208 }
1209
1210 /*
1211 * We are now in the lock queue, or the lock was already granted. If
1212 * queued, go to sleep.
1213 */
1214 if (waitResult == PROC_WAIT_STATUS_WAITING)
1215 {
1216 Assert(!dontWait);
1217 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1218 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1219 LWLockRelease(partitionLock);
1220
1221 waitResult = WaitOnLock(locallock, owner);
1222
1223 /*
1224 * NOTE: do not do any material change of state between here and
1225 * return. All required changes in locktable state must have been
1226 * done when the lock was granted to us --- see notes in WaitOnLock.
1227 */
1228
1229 if (waitResult == PROC_WAIT_STATUS_ERROR)
1230 {
1231 /*
1232 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1233 * now.
1234 */
1235 Assert(!dontWait);
1237 /* DeadLockReport() will not return */
1238 }
1239 }
1240 else
1241 LWLockRelease(partitionLock);
1242 Assert(waitResult == PROC_WAIT_STATUS_OK);
1243
1244 /* The lock was granted to us. Update the local lock entry accordingly */
1245 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1246 GrantLockLocal(locallock, owner);
1247
1248 /*
1249 * Lock state is fully up-to-date now; if we error out after this, no
1250 * special error cleanup is required.
1251 */
1253
1254 /*
1255 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1256 * standby server.
1257 */
1258 if (log_lock)
1259 {
1260 /*
1261 * Decode the locktag back to the original values, to avoid sending
1262 * lots of empty bytes with every message. See lock.h to check how a
1263 * locktag is defined for LOCKTAG_RELATION
1264 */
1266 locktag->locktag_field2);
1267 }
1268
1269 return LOCKACQUIRE_OK;
1270}
1271
1272/*
1273 * Find or create LOCK and PROCLOCK objects as needed for a new lock
1274 * request.
1275 *
1276 * Returns the PROCLOCK object, or NULL if we failed to create the objects
1277 * for lack of shared memory.
1278 *
1279 * The appropriate partition lock must be held at entry, and will be
1280 * held at exit.
1281 */
1282static PROCLOCK *
1283SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1284 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1285{
1286 LOCK *lock;
1287 PROCLOCK *proclock;
1288 PROCLOCKTAG proclocktag;
1289 uint32 proclock_hashcode;
1290 bool found;
1291
1292 /*
1293 * Find or create a lock with this tag.
1294 */
1296 locktag,
1297 hashcode,
1299 &found);
1300 if (!lock)
1301 return NULL;
1302
1303 /*
1304 * if it's a new lock object, initialize it
1305 */
1306 if (!found)
1307 {
1308 lock->grantMask = 0;
1309 lock->waitMask = 0;
1310 dlist_init(&lock->procLocks);
1311 dclist_init(&lock->waitProcs);
1312 lock->nRequested = 0;
1313 lock->nGranted = 0;
1314 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1315 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1316 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1317 }
1318 else
1319 {
1320 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1321 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1322 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1323 Assert(lock->nGranted <= lock->nRequested);
1324 }
1325
1326 /*
1327 * Create the hash key for the proclock table.
1328 */
1329 proclocktag.myLock = lock;
1330 proclocktag.myProc = proc;
1331
1332 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1333
1334 /*
1335 * Find or create a proclock entry with this tag
1336 */
1338 &proclocktag,
1339 proclock_hashcode,
1341 &found);
1342 if (!proclock)
1343 {
1344 /* Oops, not enough shmem for the proclock */
1345 if (lock->nRequested == 0)
1346 {
1347 /*
1348 * There are no other requestors of this lock, so garbage-collect
1349 * the lock object. We *must* do this to avoid a permanent leak
1350 * of shared memory, because there won't be anything to cause
1351 * anyone to release the lock object later.
1352 */
1353 Assert(dlist_is_empty(&(lock->procLocks)));
1355 &(lock->tag),
1356 hashcode,
1358 NULL))
1359 elog(PANIC, "lock table corrupted");
1360 }
1361 return NULL;
1362 }
1363
1364 /*
1365 * If new, initialize the new entry
1366 */
1367 if (!found)
1368 {
1369 uint32 partition = LockHashPartition(hashcode);
1370
1371 /*
1372 * It might seem unsafe to access proclock->groupLeader without a
1373 * lock, but it's not really. Either we are initializing a proclock
1374 * on our own behalf, in which case our group leader isn't changing
1375 * because the group leader for a process can only ever be changed by
1376 * the process itself; or else we are transferring a fast-path lock to
1377 * the main lock table, in which case that process can't change its
1378 * lock group leader without first releasing all of its locks (and in
1379 * particular the one we are currently transferring).
1380 */
1381 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1382 proc->lockGroupLeader : proc;
1383 proclock->holdMask = 0;
1384 proclock->releaseMask = 0;
1385 /* Add proclock to appropriate lists */
1386 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1387 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1388 PROCLOCK_PRINT("LockAcquire: new", proclock);
1389 }
1390 else
1391 {
1392 PROCLOCK_PRINT("LockAcquire: found", proclock);
1393 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1394
1395#ifdef CHECK_DEADLOCK_RISK
1396
1397 /*
1398 * Issue warning if we already hold a lower-level lock on this object
1399 * and do not hold a lock of the requested level or higher. This
1400 * indicates a deadlock-prone coding practice (eg, we'd have a
1401 * deadlock if another backend were following the same code path at
1402 * about the same time).
1403 *
1404 * This is not enabled by default, because it may generate log entries
1405 * about user-level coding practices that are in fact safe in context.
1406 * It can be enabled to help find system-level problems.
1407 *
1408 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1409 * better to use a table. For now, though, this works.
1410 */
1411 {
1412 int i;
1413
1414 for (i = lockMethodTable->numLockModes; i > 0; i--)
1415 {
1416 if (proclock->holdMask & LOCKBIT_ON(i))
1417 {
1418 if (i >= (int) lockmode)
1419 break; /* safe: we have a lock >= req level */
1420 elog(LOG, "deadlock risk: raising lock level"
1421 " from %s to %s on object %u/%u/%u",
1422 lockMethodTable->lockModeNames[i],
1423 lockMethodTable->lockModeNames[lockmode],
1424 lock->tag.locktag_field1, lock->tag.locktag_field2,
1425 lock->tag.locktag_field3);
1426 break;
1427 }
1428 }
1429 }
1430#endif /* CHECK_DEADLOCK_RISK */
1431 }
1432
1433 /*
1434 * lock->nRequested and lock->requested[] count the total number of
1435 * requests, whether granted or waiting, so increment those immediately.
1436 * The other counts don't increment till we get the lock.
1437 */
1438 lock->nRequested++;
1439 lock->requested[lockmode]++;
1440 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1441
1442 /*
1443 * We shouldn't already hold the desired lock; else locallock table is
1444 * broken.
1445 */
1446 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1447 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1448 lockMethodTable->lockModeNames[lockmode],
1449 lock->tag.locktag_field1, lock->tag.locktag_field2,
1450 lock->tag.locktag_field3);
1451
1452 return proclock;
1453}
1454
1455/*
1456 * Check and set/reset the flag that we hold the relation extension lock.
1457 *
1458 * It is callers responsibility that this function is called after
1459 * acquiring/releasing the relation extension lock.
1460 *
1461 * Pass acquired as true if lock is acquired, false otherwise.
1462 */
1463static inline void
1464CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1465{
1466#ifdef USE_ASSERT_CHECKING
1467 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1468 IsRelationExtensionLockHeld = acquired;
1469#endif
1470}
1471
1472/*
1473 * Subroutine to free a locallock entry
1474 */
1475static void
1477{
1478 int i;
1479
1480 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1481 {
1482 if (locallock->lockOwners[i].owner != NULL)
1483 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1484 }
1485 locallock->numLockOwners = 0;
1486 if (locallock->lockOwners != NULL)
1487 pfree(locallock->lockOwners);
1488 locallock->lockOwners = NULL;
1489
1490 if (locallock->holdsStrongLockCount)
1491 {
1492 uint32 fasthashcode;
1493
1494 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1495
1497 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1498 FastPathStrongRelationLocks->count[fasthashcode]--;
1499 locallock->holdsStrongLockCount = false;
1501 }
1502
1504 &(locallock->tag),
1505 HASH_REMOVE, NULL))
1506 elog(WARNING, "locallock table corrupted");
1507
1508 /*
1509 * Indicate that the lock is released for certain types of locks
1510 */
1511 CheckAndSetLockHeld(locallock, false);
1512}
1513
1514/*
1515 * LockCheckConflicts -- test whether requested lock conflicts
1516 * with those already granted
1517 *
1518 * Returns true if conflict, false if no conflict.
1519 *
1520 * NOTES:
1521 * Here's what makes this complicated: one process's locks don't
1522 * conflict with one another, no matter what purpose they are held for
1523 * (eg, session and transaction locks do not conflict). Nor do the locks
1524 * of one process in a lock group conflict with those of another process in
1525 * the same group. So, we must subtract off these locks when determining
1526 * whether the requested new lock conflicts with those already held.
1527 */
1528bool
1530 LOCKMODE lockmode,
1531 LOCK *lock,
1532 PROCLOCK *proclock)
1533{
1534 int numLockModes = lockMethodTable->numLockModes;
1535 LOCKMASK myLocks;
1536 int conflictMask = lockMethodTable->conflictTab[lockmode];
1537 int conflictsRemaining[MAX_LOCKMODES];
1538 int totalConflictsRemaining = 0;
1539 dlist_iter proclock_iter;
1540 int i;
1541
1542 /*
1543 * first check for global conflicts: If no locks conflict with my request,
1544 * then I get the lock.
1545 *
1546 * Checking for conflict: lock->grantMask represents the types of
1547 * currently held locks. conflictTable[lockmode] has a bit set for each
1548 * type of lock that conflicts with request. Bitwise compare tells if
1549 * there is a conflict.
1550 */
1551 if (!(conflictMask & lock->grantMask))
1552 {
1553 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1554 return false;
1555 }
1556
1557 /*
1558 * Rats. Something conflicts. But it could still be my own lock, or a
1559 * lock held by another member of my locking group. First, figure out how
1560 * many conflicts remain after subtracting out any locks I hold myself.
1561 */
1562 myLocks = proclock->holdMask;
1563 for (i = 1; i <= numLockModes; i++)
1564 {
1565 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1566 {
1567 conflictsRemaining[i] = 0;
1568 continue;
1569 }
1570 conflictsRemaining[i] = lock->granted[i];
1571 if (myLocks & LOCKBIT_ON(i))
1572 --conflictsRemaining[i];
1573 totalConflictsRemaining += conflictsRemaining[i];
1574 }
1575
1576 /* If no conflicts remain, we get the lock. */
1577 if (totalConflictsRemaining == 0)
1578 {
1579 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1580 return false;
1581 }
1582
1583 /* If no group locking, it's definitely a conflict. */
1584 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1585 {
1586 Assert(proclock->tag.myProc == MyProc);
1587 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1588 proclock);
1589 return true;
1590 }
1591
1592 /*
1593 * The relation extension lock conflict even between the group members.
1594 */
1596 {
1597 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1598 proclock);
1599 return true;
1600 }
1601
1602 /*
1603 * Locks held in conflicting modes by members of our own lock group are
1604 * not real conflicts; we can subtract those out and see if we still have
1605 * a conflict. This is O(N) in the number of processes holding or
1606 * awaiting locks on this object. We could improve that by making the
1607 * shared memory state more complex (and larger) but it doesn't seem worth
1608 * it.
1609 */
1610 dlist_foreach(proclock_iter, &lock->procLocks)
1611 {
1612 PROCLOCK *otherproclock =
1613 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1614
1615 if (proclock != otherproclock &&
1616 proclock->groupLeader == otherproclock->groupLeader &&
1617 (otherproclock->holdMask & conflictMask) != 0)
1618 {
1619 int intersectMask = otherproclock->holdMask & conflictMask;
1620
1621 for (i = 1; i <= numLockModes; i++)
1622 {
1623 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1624 {
1625 if (conflictsRemaining[i] <= 0)
1626 elog(PANIC, "proclocks held do not match lock");
1627 conflictsRemaining[i]--;
1628 totalConflictsRemaining--;
1629 }
1630 }
1631
1632 if (totalConflictsRemaining == 0)
1633 {
1634 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1635 proclock);
1636 return false;
1637 }
1638 }
1639 }
1640
1641 /* Nope, it's a real conflict. */
1642 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1643 return true;
1644}
1645
1646/*
1647 * GrantLock -- update the lock and proclock data structures to show
1648 * the lock request has been granted.
1649 *
1650 * NOTE: if proc was blocked, it also needs to be removed from the wait list
1651 * and have its waitLock/waitProcLock fields cleared. That's not done here.
1652 *
1653 * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1654 * table entry; but since we may be awaking some other process, we can't do
1655 * that here; it's done by GrantLockLocal, instead.
1656 */
1657void
1658GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1659{
1660 lock->nGranted++;
1661 lock->granted[lockmode]++;
1662 lock->grantMask |= LOCKBIT_ON(lockmode);
1663 if (lock->granted[lockmode] == lock->requested[lockmode])
1664 lock->waitMask &= LOCKBIT_OFF(lockmode);
1665 proclock->holdMask |= LOCKBIT_ON(lockmode);
1666 LOCK_PRINT("GrantLock", lock, lockmode);
1667 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1668 Assert(lock->nGranted <= lock->nRequested);
1669}
1670
1671/*
1672 * UnGrantLock -- opposite of GrantLock.
1673 *
1674 * Updates the lock and proclock data structures to show that the lock
1675 * is no longer held nor requested by the current holder.
1676 *
1677 * Returns true if there were any waiters waiting on the lock that
1678 * should now be woken up with ProcLockWakeup.
1679 */
1680static bool
1681UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1682 PROCLOCK *proclock, LockMethod lockMethodTable)
1683{
1684 bool wakeupNeeded = false;
1685
1686 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1687 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1688 Assert(lock->nGranted <= lock->nRequested);
1689
1690 /*
1691 * fix the general lock stats
1692 */
1693 lock->nRequested--;
1694 lock->requested[lockmode]--;
1695 lock->nGranted--;
1696 lock->granted[lockmode]--;
1697
1698 if (lock->granted[lockmode] == 0)
1699 {
1700 /* change the conflict mask. No more of this lock type. */
1701 lock->grantMask &= LOCKBIT_OFF(lockmode);
1702 }
1703
1704 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1705
1706 /*
1707 * We need only run ProcLockWakeup if the released lock conflicts with at
1708 * least one of the lock types requested by waiter(s). Otherwise whatever
1709 * conflict made them wait must still exist. NOTE: before MVCC, we could
1710 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1711 * not true anymore, because the remaining granted locks might belong to
1712 * some waiter, who could now be awakened because he doesn't conflict with
1713 * his own locks.
1714 */
1715 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1716 wakeupNeeded = true;
1717
1718 /*
1719 * Now fix the per-proclock state.
1720 */
1721 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1722 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1723
1724 return wakeupNeeded;
1725}
1726
1727/*
1728 * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1729 * proclock and lock objects if possible, and call ProcLockWakeup if there
1730 * are remaining requests and the caller says it's OK. (Normally, this
1731 * should be called after UnGrantLock, and wakeupNeeded is the result from
1732 * UnGrantLock.)
1733 *
1734 * The appropriate partition lock must be held at entry, and will be
1735 * held at exit.
1736 */
1737static void
1738CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1739 LockMethod lockMethodTable, uint32 hashcode,
1740 bool wakeupNeeded)
1741{
1742 /*
1743 * If this was my last hold on this lock, delete my entry in the proclock
1744 * table.
1745 */
1746 if (proclock->holdMask == 0)
1747 {
1748 uint32 proclock_hashcode;
1749
1750 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1751 dlist_delete(&proclock->lockLink);
1752 dlist_delete(&proclock->procLink);
1753 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1755 &(proclock->tag),
1756 proclock_hashcode,
1758 NULL))
1759 elog(PANIC, "proclock table corrupted");
1760 }
1761
1762 if (lock->nRequested == 0)
1763 {
1764 /*
1765 * The caller just released the last lock, so garbage-collect the lock
1766 * object.
1767 */
1768 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1771 &(lock->tag),
1772 hashcode,
1774 NULL))
1775 elog(PANIC, "lock table corrupted");
1776 }
1777 else if (wakeupNeeded)
1778 {
1779 /* There are waiters on this lock, so wake them up. */
1780 ProcLockWakeup(lockMethodTable, lock);
1781 }
1782}
1783
1784/*
1785 * GrantLockLocal -- update the locallock data structures to show
1786 * the lock request has been granted.
1787 *
1788 * We expect that LockAcquire made sure there is room to add a new
1789 * ResourceOwner entry.
1790 */
1791static void
1793{
1794 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1795 int i;
1796
1797 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1798 /* Count the total */
1799 locallock->nLocks++;
1800 /* Count the per-owner lock */
1801 for (i = 0; i < locallock->numLockOwners; i++)
1802 {
1803 if (lockOwners[i].owner == owner)
1804 {
1805 lockOwners[i].nLocks++;
1806 return;
1807 }
1808 }
1809 lockOwners[i].owner = owner;
1810 lockOwners[i].nLocks = 1;
1811 locallock->numLockOwners++;
1812 if (owner != NULL)
1813 ResourceOwnerRememberLock(owner, locallock);
1814
1815 /* Indicate that the lock is acquired for certain types of locks. */
1816 CheckAndSetLockHeld(locallock, true);
1817}
1818
1819/*
1820 * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1821 * and arrange for error cleanup if it fails
1822 */
1823static void
1824BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1825{
1827 Assert(locallock->holdsStrongLockCount == false);
1828
1829 /*
1830 * Adding to a memory location is not atomic, so we take a spinlock to
1831 * ensure we don't collide with someone else trying to bump the count at
1832 * the same time.
1833 *
1834 * XXX: It might be worth considering using an atomic fetch-and-add
1835 * instruction here, on architectures where that is supported.
1836 */
1837
1839 FastPathStrongRelationLocks->count[fasthashcode]++;
1840 locallock->holdsStrongLockCount = true;
1841 StrongLockInProgress = locallock;
1843}
1844
1845/*
1846 * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1847 * acquisition once it's no longer needed
1848 */
1849static void
1851{
1852 StrongLockInProgress = NULL;
1853}
1854
1855/*
1856 * AbortStrongLockAcquire - undo strong lock state changes performed by
1857 * BeginStrongLockAcquire.
1858 */
1859void
1861{
1862 uint32 fasthashcode;
1863 LOCALLOCK *locallock = StrongLockInProgress;
1864
1865 if (locallock == NULL)
1866 return;
1867
1868 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1869 Assert(locallock->holdsStrongLockCount == true);
1871 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1872 FastPathStrongRelationLocks->count[fasthashcode]--;
1873 locallock->holdsStrongLockCount = false;
1874 StrongLockInProgress = NULL;
1876}
1877
1878/*
1879 * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1880 * WaitOnLock on.
1881 *
1882 * proc.c needs this for the case where we are booted off the lock by
1883 * timeout, but discover that someone granted us the lock anyway.
1884 *
1885 * We could just export GrantLockLocal, but that would require including
1886 * resowner.h in lock.h, which creates circularity.
1887 */
1888void
1890{
1892}
1893
1894/*
1895 * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1896 */
1897LOCALLOCK *
1899{
1900 return awaitedLock;
1901}
1902
1903/*
1904 * ResetAwaitedLock -- Forget that we are waiting on a lock.
1905 */
1906void
1908{
1909 awaitedLock = NULL;
1910}
1911
1912/*
1913 * MarkLockClear -- mark an acquired lock as "clear"
1914 *
1915 * This means that we know we have absorbed all sinval messages that other
1916 * sessions generated before we acquired this lock, and so we can confidently
1917 * assume we know about any catalog changes protected by this lock.
1918 */
1919void
1921{
1922 Assert(locallock->nLocks > 0);
1923 locallock->lockCleared = true;
1924}
1925
1926/*
1927 * WaitOnLock -- wait to acquire a lock
1928 *
1929 * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1930 */
1931static ProcWaitStatus
1933{
1934 ProcWaitStatus result;
1935 ErrorContextCallback waiterrcontext;
1936
1937 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1938 locallock->tag.lock.locktag_field2,
1939 locallock->tag.lock.locktag_field3,
1940 locallock->tag.lock.locktag_field4,
1941 locallock->tag.lock.locktag_type,
1942 locallock->tag.mode);
1943
1944 /* Setup error traceback support for ereport() */
1945 waiterrcontext.callback = waitonlock_error_callback;
1946 waiterrcontext.arg = (void *) locallock;
1947 waiterrcontext.previous = error_context_stack;
1948 error_context_stack = &waiterrcontext;
1949
1950 /* adjust the process title to indicate that it's waiting */
1951 set_ps_display_suffix("waiting");
1952
1953 /*
1954 * Record the fact that we are waiting for a lock, so that
1955 * LockErrorCleanup will clean up if cancel/die happens.
1956 */
1957 awaitedLock = locallock;
1958 awaitedOwner = owner;
1959
1960 /*
1961 * NOTE: Think not to put any shared-state cleanup after the call to
1962 * ProcSleep, in either the normal or failure path. The lock state must
1963 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1964 * waiting for the lock. This is necessary because of the possibility
1965 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1966 * grants us the lock, but before we've noticed it. Hence, after granting,
1967 * the locktable state must fully reflect the fact that we own the lock;
1968 * we can't do additional work on return.
1969 *
1970 * We can and do use a PG_TRY block to try to clean up after failure, but
1971 * this still has a major limitation: elog(FATAL) can occur while waiting
1972 * (eg, a "die" interrupt), and then control won't come back here. So all
1973 * cleanup of essential state should happen in LockErrorCleanup, not here.
1974 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1975 * is unimportant if the process exits.
1976 */
1977 PG_TRY();
1978 {
1979 result = ProcSleep(locallock);
1980 }
1981 PG_CATCH();
1982 {
1983 /* In this path, awaitedLock remains set until LockErrorCleanup */
1984
1985 /* reset ps display to remove the suffix */
1987
1988 /* and propagate the error */
1989 PG_RE_THROW();
1990 }
1991 PG_END_TRY();
1992
1993 /*
1994 * We no longer want LockErrorCleanup to do anything.
1995 */
1996 awaitedLock = NULL;
1997
1998 /* reset ps display to remove the suffix */
2000
2001 error_context_stack = waiterrcontext.previous;
2002
2003 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2004 locallock->tag.lock.locktag_field2,
2005 locallock->tag.lock.locktag_field3,
2006 locallock->tag.lock.locktag_field4,
2007 locallock->tag.lock.locktag_type,
2008 locallock->tag.mode);
2009
2010 return result;
2011}
2012
2013/*
2014 * error context callback for failures in WaitOnLock
2015 *
2016 * We report which lock was being waited on, in the same style used in
2017 * deadlock reports. This helps with lock timeout errors in particular.
2018 */
2019static void
2021{
2022 LOCALLOCK *locallock = (LOCALLOCK *) arg;
2023 const LOCKTAG *tag = &locallock->tag.lock;
2024 LOCKMODE mode = locallock->tag.mode;
2025 StringInfoData locktagbuf;
2026
2027 initStringInfo(&locktagbuf);
2028 DescribeLockTag(&locktagbuf, tag);
2029
2030 errcontext("waiting for %s on %s",
2032 locktagbuf.data);
2033}
2034
2035/*
2036 * Remove a proc from the wait-queue it is on (caller must know it is on one).
2037 * This is only used when the proc has failed to get the lock, so we set its
2038 * waitStatus to PROC_WAIT_STATUS_ERROR.
2039 *
2040 * Appropriate partition lock must be held by caller. Also, caller is
2041 * responsible for signaling the proc if needed.
2042 *
2043 * NB: this does not clean up any locallock object that may exist for the lock.
2044 */
2045void
2047{
2048 LOCK *waitLock = proc->waitLock;
2049 PROCLOCK *proclock = proc->waitProcLock;
2050 LOCKMODE lockmode = proc->waitLockMode;
2051 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
2052
2053 /* Make sure proc is waiting */
2055 Assert(proc->links.next != NULL);
2056 Assert(waitLock);
2057 Assert(!dclist_is_empty(&waitLock->waitProcs));
2058 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
2059
2060 /* Remove proc from lock's wait queue */
2061 dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
2062
2063 /* Undo increments of request counts by waiting process */
2064 Assert(waitLock->nRequested > 0);
2065 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2066 waitLock->nRequested--;
2067 Assert(waitLock->requested[lockmode] > 0);
2068 waitLock->requested[lockmode]--;
2069 /* don't forget to clear waitMask bit if appropriate */
2070 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2071 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2072
2073 /* Clean up the proc's own state, and pass it the ok/fail signal */
2074 proc->waitLock = NULL;
2075 proc->waitProcLock = NULL;
2077
2078 /*
2079 * Delete the proclock immediately if it represents no already-held locks.
2080 * (This must happen now because if the owner of the lock decides to
2081 * release it, and the requested/granted counts then go to zero,
2082 * LockRelease expects there to be no remaining proclocks.) Then see if
2083 * any other waiters for the lock can be woken up now.
2084 */
2085 CleanUpLock(waitLock, proclock,
2086 LockMethods[lockmethodid], hashcode,
2087 true);
2088}
2089
2090/*
2091 * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2092 * Release a session lock if 'sessionLock' is true, else release a
2093 * regular transaction lock.
2094 *
2095 * Side Effects: find any waiting processes that are now wakable,
2096 * grant them their requested locks and awaken them.
2097 * (We have to grant the lock here to avoid a race between
2098 * the waking process and any new process to
2099 * come along and request the lock.)
2100 */
2101bool
2102LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2103{
2104 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2105 LockMethod lockMethodTable;
2106 LOCALLOCKTAG localtag;
2107 LOCALLOCK *locallock;
2108 LOCK *lock;
2109 PROCLOCK *proclock;
2110 LWLock *partitionLock;
2111 bool wakeupNeeded;
2112
2113 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2114 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2115 lockMethodTable = LockMethods[lockmethodid];
2116 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2117 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2118
2119#ifdef LOCK_DEBUG
2120 if (LOCK_DEBUG_ENABLED(locktag))
2121 elog(LOG, "LockRelease: lock [%u,%u] %s",
2122 locktag->locktag_field1, locktag->locktag_field2,
2123 lockMethodTable->lockModeNames[lockmode]);
2124#endif
2125
2126 /*
2127 * Find the LOCALLOCK entry for this lock and lockmode
2128 */
2129 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2130 localtag.lock = *locktag;
2131 localtag.mode = lockmode;
2132
2134 &localtag,
2135 HASH_FIND, NULL);
2136
2137 /*
2138 * let the caller print its own error message, too. Do not ereport(ERROR).
2139 */
2140 if (!locallock || locallock->nLocks <= 0)
2141 {
2142 elog(WARNING, "you don't own a lock of type %s",
2143 lockMethodTable->lockModeNames[lockmode]);
2144 return false;
2145 }
2146
2147 /*
2148 * Decrease the count for the resource owner.
2149 */
2150 {
2151 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2152 ResourceOwner owner;
2153 int i;
2154
2155 /* Identify owner for lock */
2156 if (sessionLock)
2157 owner = NULL;
2158 else
2159 owner = CurrentResourceOwner;
2160
2161 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2162 {
2163 if (lockOwners[i].owner == owner)
2164 {
2165 Assert(lockOwners[i].nLocks > 0);
2166 if (--lockOwners[i].nLocks == 0)
2167 {
2168 if (owner != NULL)
2169 ResourceOwnerForgetLock(owner, locallock);
2170 /* compact out unused slot */
2171 locallock->numLockOwners--;
2172 if (i < locallock->numLockOwners)
2173 lockOwners[i] = lockOwners[locallock->numLockOwners];
2174 }
2175 break;
2176 }
2177 }
2178 if (i < 0)
2179 {
2180 /* don't release a lock belonging to another owner */
2181 elog(WARNING, "you don't own a lock of type %s",
2182 lockMethodTable->lockModeNames[lockmode]);
2183 return false;
2184 }
2185 }
2186
2187 /*
2188 * Decrease the total local count. If we're still holding the lock, we're
2189 * done.
2190 */
2191 locallock->nLocks--;
2192
2193 if (locallock->nLocks > 0)
2194 return true;
2195
2196 /*
2197 * At this point we can no longer suppose we are clear of invalidation
2198 * messages related to this lock. Although we'll delete the LOCALLOCK
2199 * object before any intentional return from this routine, it seems worth
2200 * the trouble to explicitly reset lockCleared right now, just in case
2201 * some error prevents us from deleting the LOCALLOCK.
2202 */
2203 locallock->lockCleared = false;
2204
2205 /* Attempt fast release of any lock eligible for the fast path. */
2206 if (EligibleForRelationFastPath(locktag, lockmode) &&
2208 {
2209 bool released;
2210
2211 /*
2212 * We might not find the lock here, even if we originally entered it
2213 * here. Another backend may have moved it to the main table.
2214 */
2216 released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2217 lockmode);
2219 if (released)
2220 {
2221 RemoveLocalLock(locallock);
2222 return true;
2223 }
2224 }
2225
2226 /*
2227 * Otherwise we've got to mess with the shared lock table.
2228 */
2229 partitionLock = LockHashPartitionLock(locallock->hashcode);
2230
2231 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2232
2233 /*
2234 * Normally, we don't need to re-find the lock or proclock, since we kept
2235 * their addresses in the locallock table, and they couldn't have been
2236 * removed while we were holding a lock on them. But it's possible that
2237 * the lock was taken fast-path and has since been moved to the main hash
2238 * table by another backend, in which case we will need to look up the
2239 * objects here. We assume the lock field is NULL if so.
2240 */
2241 lock = locallock->lock;
2242 if (!lock)
2243 {
2244 PROCLOCKTAG proclocktag;
2245
2246 Assert(EligibleForRelationFastPath(locktag, lockmode));
2248 locktag,
2249 locallock->hashcode,
2250 HASH_FIND,
2251 NULL);
2252 if (!lock)
2253 elog(ERROR, "failed to re-find shared lock object");
2254 locallock->lock = lock;
2255
2256 proclocktag.myLock = lock;
2257 proclocktag.myProc = MyProc;
2259 &proclocktag,
2260 HASH_FIND,
2261 NULL);
2262 if (!locallock->proclock)
2263 elog(ERROR, "failed to re-find shared proclock object");
2264 }
2265 LOCK_PRINT("LockRelease: found", lock, lockmode);
2266 proclock = locallock->proclock;
2267 PROCLOCK_PRINT("LockRelease: found", proclock);
2268
2269 /*
2270 * Double-check that we are actually holding a lock of the type we want to
2271 * release.
2272 */
2273 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2274 {
2275 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2276 LWLockRelease(partitionLock);
2277 elog(WARNING, "you don't own a lock of type %s",
2278 lockMethodTable->lockModeNames[lockmode]);
2279 RemoveLocalLock(locallock);
2280 return false;
2281 }
2282
2283 /*
2284 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2285 */
2286 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2287
2288 CleanUpLock(lock, proclock,
2289 lockMethodTable, locallock->hashcode,
2290 wakeupNeeded);
2291
2292 LWLockRelease(partitionLock);
2293
2294 RemoveLocalLock(locallock);
2295 return true;
2296}
2297
2298/*
2299 * LockReleaseAll -- Release all locks of the specified lock method that
2300 * are held by the current process.
2301 *
2302 * Well, not necessarily *all* locks. The available behaviors are:
2303 * allLocks == true: release all locks including session locks.
2304 * allLocks == false: release all non-session locks.
2305 */
2306void
2307LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2308{
2309 HASH_SEQ_STATUS status;
2310 LockMethod lockMethodTable;
2311 int i,
2312 numLockModes;
2313 LOCALLOCK *locallock;
2314 LOCK *lock;
2315 int partition;
2316 bool have_fast_path_lwlock = false;
2317
2318 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2319 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2320 lockMethodTable = LockMethods[lockmethodid];
2321
2322#ifdef LOCK_DEBUG
2323 if (*(lockMethodTable->trace_flag))
2324 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2325#endif
2326
2327 /*
2328 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2329 * the only way that the lock we hold on our own VXID can ever get
2330 * released: it is always and only released when a toplevel transaction
2331 * ends.
2332 */
2333 if (lockmethodid == DEFAULT_LOCKMETHOD)
2335
2336 numLockModes = lockMethodTable->numLockModes;
2337
2338 /*
2339 * First we run through the locallock table and get rid of unwanted
2340 * entries, then we scan the process's proclocks and get rid of those. We
2341 * do this separately because we may have multiple locallock entries
2342 * pointing to the same proclock, and we daren't end up with any dangling
2343 * pointers. Fast-path locks are cleaned up during the locallock table
2344 * scan, though.
2345 */
2347
2348 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2349 {
2350 /*
2351 * If the LOCALLOCK entry is unused, something must've gone wrong
2352 * while trying to acquire this lock. Just forget the local entry.
2353 */
2354 if (locallock->nLocks == 0)
2355 {
2356 RemoveLocalLock(locallock);
2357 continue;
2358 }
2359
2360 /* Ignore items that are not of the lockmethod to be removed */
2361 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2362 continue;
2363
2364 /*
2365 * If we are asked to release all locks, we can just zap the entry.
2366 * Otherwise, must scan to see if there are session locks. We assume
2367 * there is at most one lockOwners entry for session locks.
2368 */
2369 if (!allLocks)
2370 {
2371 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2372
2373 /* If session lock is above array position 0, move it down to 0 */
2374 for (i = 0; i < locallock->numLockOwners; i++)
2375 {
2376 if (lockOwners[i].owner == NULL)
2377 lockOwners[0] = lockOwners[i];
2378 else
2379 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2380 }
2381
2382 if (locallock->numLockOwners > 0 &&
2383 lockOwners[0].owner == NULL &&
2384 lockOwners[0].nLocks > 0)
2385 {
2386 /* Fix the locallock to show just the session locks */
2387 locallock->nLocks = lockOwners[0].nLocks;
2388 locallock->numLockOwners = 1;
2389 /* We aren't deleting this locallock, so done */
2390 continue;
2391 }
2392 else
2393 locallock->numLockOwners = 0;
2394 }
2395
2396#ifdef USE_ASSERT_CHECKING
2397
2398 /*
2399 * Tuple locks are currently held only for short durations within a
2400 * transaction. Check that we didn't forget to release one.
2401 */
2402 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
2403 elog(WARNING, "tuple lock held at commit");
2404#endif
2405
2406 /*
2407 * If the lock or proclock pointers are NULL, this lock was taken via
2408 * the relation fast-path (and is not known to have been transferred).
2409 */
2410 if (locallock->proclock == NULL || locallock->lock == NULL)
2411 {
2412 LOCKMODE lockmode = locallock->tag.mode;
2413 Oid relid;
2414
2415 /* Verify that a fast-path lock is what we've got. */
2416 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2417 elog(PANIC, "locallock table corrupted");
2418
2419 /*
2420 * If we don't currently hold the LWLock that protects our
2421 * fast-path data structures, we must acquire it before attempting
2422 * to release the lock via the fast-path. We will continue to
2423 * hold the LWLock until we're done scanning the locallock table,
2424 * unless we hit a transferred fast-path lock. (XXX is this
2425 * really such a good idea? There could be a lot of entries ...)
2426 */
2427 if (!have_fast_path_lwlock)
2428 {
2430 have_fast_path_lwlock = true;
2431 }
2432
2433 /* Attempt fast-path release. */
2434 relid = locallock->tag.lock.locktag_field2;
2435 if (FastPathUnGrantRelationLock(relid, lockmode))
2436 {
2437 RemoveLocalLock(locallock);
2438 continue;
2439 }
2440
2441 /*
2442 * Our lock, originally taken via the fast path, has been
2443 * transferred to the main lock table. That's going to require
2444 * some extra work, so release our fast-path lock before starting.
2445 */
2447 have_fast_path_lwlock = false;
2448
2449 /*
2450 * Now dump the lock. We haven't got a pointer to the LOCK or
2451 * PROCLOCK in this case, so we have to handle this a bit
2452 * differently than a normal lock release. Unfortunately, this
2453 * requires an extra LWLock acquire-and-release cycle on the
2454 * partitionLock, but hopefully it shouldn't happen often.
2455 */
2456 LockRefindAndRelease(lockMethodTable, MyProc,
2457 &locallock->tag.lock, lockmode, false);
2458 RemoveLocalLock(locallock);
2459 continue;
2460 }
2461
2462 /* Mark the proclock to show we need to release this lockmode */
2463 if (locallock->nLocks > 0)
2464 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2465
2466 /* And remove the locallock hashtable entry */
2467 RemoveLocalLock(locallock);
2468 }
2469
2470 /* Done with the fast-path data structures */
2471 if (have_fast_path_lwlock)
2473
2474 /*
2475 * Now, scan each lock partition separately.
2476 */
2477 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2478 {
2479 LWLock *partitionLock;
2480 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2481 dlist_mutable_iter proclock_iter;
2482
2483 partitionLock = LockHashPartitionLockByIndex(partition);
2484
2485 /*
2486 * If the proclock list for this partition is empty, we can skip
2487 * acquiring the partition lock. This optimization is trickier than
2488 * it looks, because another backend could be in process of adding
2489 * something to our proclock list due to promoting one of our
2490 * fast-path locks. However, any such lock must be one that we
2491 * decided not to delete above, so it's okay to skip it again now;
2492 * we'd just decide not to delete it again. We must, however, be
2493 * careful to re-fetch the list header once we've acquired the
2494 * partition lock, to be sure we have a valid, up-to-date pointer.
2495 * (There is probably no significant risk if pointer fetch/store is
2496 * atomic, but we don't wish to assume that.)
2497 *
2498 * XXX This argument assumes that the locallock table correctly
2499 * represents all of our fast-path locks. While allLocks mode
2500 * guarantees to clean up all of our normal locks regardless of the
2501 * locallock situation, we lose that guarantee for fast-path locks.
2502 * This is not ideal.
2503 */
2504 if (dlist_is_empty(procLocks))
2505 continue; /* needn't examine this partition */
2506
2507 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2508
2509 dlist_foreach_modify(proclock_iter, procLocks)
2510 {
2511 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2512 bool wakeupNeeded = false;
2513
2514 Assert(proclock->tag.myProc == MyProc);
2515
2516 lock = proclock->tag.myLock;
2517
2518 /* Ignore items that are not of the lockmethod to be removed */
2519 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2520 continue;
2521
2522 /*
2523 * In allLocks mode, force release of all locks even if locallock
2524 * table had problems
2525 */
2526 if (allLocks)
2527 proclock->releaseMask = proclock->holdMask;
2528 else
2529 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2530
2531 /*
2532 * Ignore items that have nothing to be released, unless they have
2533 * holdMask == 0 and are therefore recyclable
2534 */
2535 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2536 continue;
2537
2538 PROCLOCK_PRINT("LockReleaseAll", proclock);
2539 LOCK_PRINT("LockReleaseAll", lock, 0);
2540 Assert(lock->nRequested >= 0);
2541 Assert(lock->nGranted >= 0);
2542 Assert(lock->nGranted <= lock->nRequested);
2543 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2544
2545 /*
2546 * Release the previously-marked lock modes
2547 */
2548 for (i = 1; i <= numLockModes; i++)
2549 {
2550 if (proclock->releaseMask & LOCKBIT_ON(i))
2551 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2552 lockMethodTable);
2553 }
2554 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2555 Assert(lock->nGranted <= lock->nRequested);
2556 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2557
2558 proclock->releaseMask = 0;
2559
2560 /* CleanUpLock will wake up waiters if needed. */
2561 CleanUpLock(lock, proclock,
2562 lockMethodTable,
2563 LockTagHashCode(&lock->tag),
2564 wakeupNeeded);
2565 } /* loop over PROCLOCKs within this partition */
2566
2567 LWLockRelease(partitionLock);
2568 } /* loop over partitions */
2569
2570#ifdef LOCK_DEBUG
2571 if (*(lockMethodTable->trace_flag))
2572 elog(LOG, "LockReleaseAll done");
2573#endif
2574}
2575
2576/*
2577 * LockReleaseSession -- Release all session locks of the specified lock method
2578 * that are held by the current process.
2579 */
2580void
2582{
2583 HASH_SEQ_STATUS status;
2584 LOCALLOCK *locallock;
2585
2586 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2587 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2588
2590
2591 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2592 {
2593 /* Ignore items that are not of the specified lock method */
2594 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2595 continue;
2596
2597 ReleaseLockIfHeld(locallock, true);
2598 }
2599}
2600
2601/*
2602 * LockReleaseCurrentOwner
2603 * Release all locks belonging to CurrentResourceOwner
2604 *
2605 * If the caller knows what those locks are, it can pass them as an array.
2606 * That speeds up the call significantly, when a lot of locks are held.
2607 * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2608 * table to find them.
2609 */
2610void
2611LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2612{
2613 if (locallocks == NULL)
2614 {
2615 HASH_SEQ_STATUS status;
2616 LOCALLOCK *locallock;
2617
2619
2620 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2621 ReleaseLockIfHeld(locallock, false);
2622 }
2623 else
2624 {
2625 int i;
2626
2627 for (i = nlocks - 1; i >= 0; i--)
2628 ReleaseLockIfHeld(locallocks[i], false);
2629 }
2630}
2631
2632/*
2633 * ReleaseLockIfHeld
2634 * Release any session-level locks on this lockable object if sessionLock
2635 * is true; else, release any locks held by CurrentResourceOwner.
2636 *
2637 * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2638 * locks), but without refactoring LockRelease() we cannot support releasing
2639 * locks belonging to resource owners other than CurrentResourceOwner.
2640 * If we were to refactor, it'd be a good idea to fix it so we don't have to
2641 * do a hashtable lookup of the locallock, too. However, currently this
2642 * function isn't used heavily enough to justify refactoring for its
2643 * convenience.
2644 */
2645static void
2646ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2647{
2648 ResourceOwner owner;
2649 LOCALLOCKOWNER *lockOwners;
2650 int i;
2651
2652 /* Identify owner for lock (must match LockRelease!) */
2653 if (sessionLock)
2654 owner = NULL;
2655 else
2656 owner = CurrentResourceOwner;
2657
2658 /* Scan to see if there are any locks belonging to the target owner */
2659 lockOwners = locallock->lockOwners;
2660 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2661 {
2662 if (lockOwners[i].owner == owner)
2663 {
2664 Assert(lockOwners[i].nLocks > 0);
2665 if (lockOwners[i].nLocks < locallock->nLocks)
2666 {
2667 /*
2668 * We will still hold this lock after forgetting this
2669 * ResourceOwner.
2670 */
2671 locallock->nLocks -= lockOwners[i].nLocks;
2672 /* compact out unused slot */
2673 locallock->numLockOwners--;
2674 if (owner != NULL)
2675 ResourceOwnerForgetLock(owner, locallock);
2676 if (i < locallock->numLockOwners)
2677 lockOwners[i] = lockOwners[locallock->numLockOwners];
2678 }
2679 else
2680 {
2681 Assert(lockOwners[i].nLocks == locallock->nLocks);
2682 /* We want to call LockRelease just once */
2683 lockOwners[i].nLocks = 1;
2684 locallock->nLocks = 1;
2685 if (!LockRelease(&locallock->tag.lock,
2686 locallock->tag.mode,
2687 sessionLock))
2688 elog(WARNING, "ReleaseLockIfHeld: failed??");
2689 }
2690 break;
2691 }
2692 }
2693}
2694
2695/*
2696 * LockReassignCurrentOwner
2697 * Reassign all locks belonging to CurrentResourceOwner to belong
2698 * to its parent resource owner.
2699 *
2700 * If the caller knows what those locks are, it can pass them as an array.
2701 * That speeds up the call significantly, when a lot of locks are held
2702 * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2703 * and we'll traverse through our hash table to find them.
2704 */
2705void
2706LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2707{
2709
2710 Assert(parent != NULL);
2711
2712 if (locallocks == NULL)
2713 {
2714 HASH_SEQ_STATUS status;
2715 LOCALLOCK *locallock;
2716
2718
2719 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2720 LockReassignOwner(locallock, parent);
2721 }
2722 else
2723 {
2724 int i;
2725
2726 for (i = nlocks - 1; i >= 0; i--)
2727 LockReassignOwner(locallocks[i], parent);
2728 }
2729}
2730
2731/*
2732 * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2733 * CurrentResourceOwner to its parent.
2734 */
2735static void
2737{
2738 LOCALLOCKOWNER *lockOwners;
2739 int i;
2740 int ic = -1;
2741 int ip = -1;
2742
2743 /*
2744 * Scan to see if there are any locks belonging to current owner or its
2745 * parent
2746 */
2747 lockOwners = locallock->lockOwners;
2748 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2749 {
2750 if (lockOwners[i].owner == CurrentResourceOwner)
2751 ic = i;
2752 else if (lockOwners[i].owner == parent)
2753 ip = i;
2754 }
2755
2756 if (ic < 0)
2757 return; /* no current locks */
2758
2759 if (ip < 0)
2760 {
2761 /* Parent has no slot, so just give it the child's slot */
2762 lockOwners[ic].owner = parent;
2763 ResourceOwnerRememberLock(parent, locallock);
2764 }
2765 else
2766 {
2767 /* Merge child's count with parent's */
2768 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2769 /* compact out unused slot */
2770 locallock->numLockOwners--;
2771 if (ic < locallock->numLockOwners)
2772 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2773 }
2775}
2776
2777/*
2778 * FastPathGrantRelationLock
2779 * Grant lock using per-backend fast-path array, if there is space.
2780 */
2781static bool
2783{
2784 uint32 i;
2785 uint32 unused_slot = FastPathLockSlotsPerBackend();
2786
2787 /* fast-path group the lock belongs to */
2788 uint32 group = FAST_PATH_REL_GROUP(relid);
2789
2790 /* Scan for existing entry for this relid, remembering empty slot. */
2791 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2792 {
2793 /* index into the whole per-backend array */
2794 uint32 f = FAST_PATH_SLOT(group, i);
2795
2796 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2797 unused_slot = f;
2798 else if (MyProc->fpRelId[f] == relid)
2799 {
2800 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2801 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2802 return true;
2803 }
2804 }
2805
2806 /* If no existing entry, use any empty slot. */
2807 if (unused_slot < FastPathLockSlotsPerBackend())
2808 {
2809 MyProc->fpRelId[unused_slot] = relid;
2810 FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2811 ++FastPathLocalUseCounts[group];
2812 return true;
2813 }
2814
2815 /* No existing entry, and no empty slot. */
2816 return false;
2817}
2818
2819/*
2820 * FastPathUnGrantRelationLock
2821 * Release fast-path lock, if present. Update backend-private local
2822 * use count, while we're at it.
2823 */
2824static bool
2826{
2827 uint32 i;
2828 bool result = false;
2829
2830 /* fast-path group the lock belongs to */
2831 uint32 group = FAST_PATH_REL_GROUP(relid);
2832
2833 FastPathLocalUseCounts[group] = 0;
2834 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2835 {
2836 /* index into the whole per-backend array */
2837 uint32 f = FAST_PATH_SLOT(group, i);
2838
2839 if (MyProc->fpRelId[f] == relid
2840 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2841 {
2842 Assert(!result);
2843 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2844 result = true;
2845 /* we continue iterating so as to update FastPathLocalUseCount */
2846 }
2847 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2848 ++FastPathLocalUseCounts[group];
2849 }
2850 return result;
2851}
2852
2853/*
2854 * FastPathTransferRelationLocks
2855 * Transfer locks matching the given lock tag from per-backend fast-path
2856 * arrays to the shared hash table.
2857 *
2858 * Returns true if successful, false if ran out of shared memory.
2859 */
2860static bool
2861FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2862 uint32 hashcode)
2863{
2864 LWLock *partitionLock = LockHashPartitionLock(hashcode);
2865 Oid relid = locktag->locktag_field2;
2866 uint32 i;
2867
2868 /* fast-path group the lock belongs to */
2869 uint32 group = FAST_PATH_REL_GROUP(relid);
2870
2871 /*
2872 * Every PGPROC that can potentially hold a fast-path lock is present in
2873 * ProcGlobal->allProcs. Prepared transactions are not, but any
2874 * outstanding fast-path locks held by prepared transactions are
2875 * transferred to the main lock table.
2876 */
2877 for (i = 0; i < ProcGlobal->allProcCount; i++)
2878 {
2879 PGPROC *proc = &ProcGlobal->allProcs[i];
2880 uint32 j;
2881
2883
2884 /*
2885 * If the target backend isn't referencing the same database as the
2886 * lock, then we needn't examine the individual relation IDs at all;
2887 * none of them can be relevant.
2888 *
2889 * proc->databaseId is set at backend startup time and never changes
2890 * thereafter, so it might be safe to perform this test before
2891 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2892 * assume that if the target backend holds any fast-path locks, it
2893 * must have performed a memory-fencing operation (in particular, an
2894 * LWLock acquisition) since setting proc->databaseId. However, it's
2895 * less clear that our backend is certain to have performed a memory
2896 * fencing operation since the other backend set proc->databaseId. So
2897 * for now, we test it after acquiring the LWLock just to be safe.
2898 *
2899 * Also skip groups without any registered fast-path locks.
2900 */
2901 if (proc->databaseId != locktag->locktag_field1 ||
2902 proc->fpLockBits[group] == 0)
2903 {
2904 LWLockRelease(&proc->fpInfoLock);
2905 continue;
2906 }
2907
2908 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2909 {
2910 uint32 lockmode;
2911
2912 /* index into the whole per-backend array */
2913 uint32 f = FAST_PATH_SLOT(group, j);
2914
2915 /* Look for an allocated slot matching the given relid. */
2916 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2917 continue;
2918
2919 /* Find or create lock object. */
2920 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2921 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2923 ++lockmode)
2924 {
2925 PROCLOCK *proclock;
2926
2927 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2928 continue;
2929 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2930 hashcode, lockmode);
2931 if (!proclock)
2932 {
2933 LWLockRelease(partitionLock);
2934 LWLockRelease(&proc->fpInfoLock);
2935 return false;
2936 }
2937 GrantLock(proclock->tag.myLock, proclock, lockmode);
2938 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2939 }
2940 LWLockRelease(partitionLock);
2941
2942 /* No need to examine remaining slots. */
2943 break;
2944 }
2945 LWLockRelease(&proc->fpInfoLock);
2946 }
2947 return true;
2948}
2949
2950/*
2951 * FastPathGetRelationLockEntry
2952 * Return the PROCLOCK for a lock originally taken via the fast-path,
2953 * transferring it to the primary lock table if necessary.
2954 *
2955 * Note: caller takes care of updating the locallock object.
2956 */
2957static PROCLOCK *
2959{
2960 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2961 LOCKTAG *locktag = &locallock->tag.lock;
2962 PROCLOCK *proclock = NULL;
2963 LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2964 Oid relid = locktag->locktag_field2;
2965 uint32 i,
2966 group;
2967
2968 /* fast-path group the lock belongs to */
2969 group = FAST_PATH_REL_GROUP(relid);
2970
2972
2973 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2974 {
2975 uint32 lockmode;
2976
2977 /* index into the whole per-backend array */
2978 uint32 f = FAST_PATH_SLOT(group, i);
2979
2980 /* Look for an allocated slot matching the given relid. */
2981 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2982 continue;
2983
2984 /* If we don't have a lock of the given mode, forget it! */
2985 lockmode = locallock->tag.mode;
2986 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2987 break;
2988
2989 /* Find or create lock object. */
2990 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2991
2992 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2993 locallock->hashcode, lockmode);
2994 if (!proclock)
2995 {
2996 LWLockRelease(partitionLock);
2998 ereport(ERROR,
2999 (errcode(ERRCODE_OUT_OF_MEMORY),
3000 errmsg("out of shared memory"),
3001 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3002 }
3003 GrantLock(proclock->tag.myLock, proclock, lockmode);
3004 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3005
3006 LWLockRelease(partitionLock);
3007
3008 /* No need to examine remaining slots. */
3009 break;
3010 }
3011
3013
3014 /* Lock may have already been transferred by some other backend. */
3015 if (proclock == NULL)
3016 {
3017 LOCK *lock;
3018 PROCLOCKTAG proclocktag;
3019 uint32 proclock_hashcode;
3020
3021 LWLockAcquire(partitionLock, LW_SHARED);
3022
3024 locktag,
3025 locallock->hashcode,
3026 HASH_FIND,
3027 NULL);
3028 if (!lock)
3029 elog(ERROR, "failed to re-find shared lock object");
3030
3031 proclocktag.myLock = lock;
3032 proclocktag.myProc = MyProc;
3033
3034 proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
3035 proclock = (PROCLOCK *)
3037 &proclocktag,
3038 proclock_hashcode,
3039 HASH_FIND,
3040 NULL);
3041 if (!proclock)
3042 elog(ERROR, "failed to re-find shared proclock object");
3043 LWLockRelease(partitionLock);
3044 }
3045
3046 return proclock;
3047}
3048
3049/*
3050 * GetLockConflicts
3051 * Get an array of VirtualTransactionIds of xacts currently holding locks
3052 * that would conflict with the specified lock/lockmode.
3053 * xacts merely awaiting such a lock are NOT reported.
3054 *
3055 * The result array is palloc'd and is terminated with an invalid VXID.
3056 * *countp, if not null, is updated to the number of items set.
3057 *
3058 * Of course, the result could be out of date by the time it's returned, so
3059 * use of this function has to be thought about carefully. Similarly, a
3060 * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3061 * lock it holds. Existing callers don't care about a locker after that
3062 * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3063 * pg_xact updates and before releasing locks.
3064 *
3065 * Note we never include the current xact's vxid in the result array,
3066 * since an xact never blocks itself.
3067 */
3069GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3070{
3071 static VirtualTransactionId *vxids;
3072 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
3073 LockMethod lockMethodTable;
3074 LOCK *lock;
3075 LOCKMASK conflictMask;
3076 dlist_iter proclock_iter;
3077 PROCLOCK *proclock;
3078 uint32 hashcode;
3079 LWLock *partitionLock;
3080 int count = 0;
3081 int fast_count = 0;
3082
3083 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
3084 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3085 lockMethodTable = LockMethods[lockmethodid];
3086 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
3087 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3088
3089 /*
3090 * Allocate memory to store results, and fill with InvalidVXID. We only
3091 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3092 * InHotStandby allocate once in TopMemoryContext.
3093 */
3094 if (InHotStandby)
3095 {
3096 if (vxids == NULL)
3097 vxids = (VirtualTransactionId *)
3099 sizeof(VirtualTransactionId) *
3101 }
3102 else
3103 vxids = (VirtualTransactionId *)
3106
3107 /* Compute hash code and partition lock, and look up conflicting modes. */
3108 hashcode = LockTagHashCode(locktag);
3109 partitionLock = LockHashPartitionLock(hashcode);
3110 conflictMask = lockMethodTable->conflictTab[lockmode];
3111
3112 /*
3113 * Fast path locks might not have been entered in the primary lock table.
3114 * If the lock we're dealing with could conflict with such a lock, we must
3115 * examine each backend's fast-path array for conflicts.
3116 */
3117 if (ConflictsWithRelationFastPath(locktag, lockmode))
3118 {
3119 int i;
3120 Oid relid = locktag->locktag_field2;
3122
3123 /* fast-path group the lock belongs to */
3124 uint32 group = FAST_PATH_REL_GROUP(relid);
3125
3126 /*
3127 * Iterate over relevant PGPROCs. Anything held by a prepared
3128 * transaction will have been transferred to the primary lock table,
3129 * so we need not worry about those. This is all a bit fuzzy, because
3130 * new locks could be taken after we've visited a particular
3131 * partition, but the callers had better be prepared to deal with that
3132 * anyway, since the locks could equally well be taken between the
3133 * time we return the value and the time the caller does something
3134 * with it.
3135 */
3136 for (i = 0; i < ProcGlobal->allProcCount; i++)
3137 {
3138 PGPROC *proc = &ProcGlobal->allProcs[i];
3139 uint32 j;
3140
3141 /* A backend never blocks itself */
3142 if (proc == MyProc)
3143 continue;
3144
3146
3147 /*
3148 * If the target backend isn't referencing the same database as
3149 * the lock, then we needn't examine the individual relation IDs
3150 * at all; none of them can be relevant.
3151 *
3152 * See FastPathTransferRelationLocks() for discussion of why we do
3153 * this test after acquiring the lock.
3154 *
3155 * Also skip groups without any registered fast-path locks.
3156 */
3157 if (proc->databaseId != locktag->locktag_field1 ||
3158 proc->fpLockBits[group] == 0)
3159 {
3160 LWLockRelease(&proc->fpInfoLock);
3161 continue;
3162 }
3163
3164 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3165 {
3166 uint32 lockmask;
3167
3168 /* index into the whole per-backend array */
3169 uint32 f = FAST_PATH_SLOT(group, j);
3170
3171 /* Look for an allocated slot matching the given relid. */
3172 if (relid != proc->fpRelId[f])
3173 continue;
3174 lockmask = FAST_PATH_GET_BITS(proc, f);
3175 if (!lockmask)
3176 continue;
3177 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3178
3179 /*
3180 * There can only be one entry per relation, so if we found it
3181 * and it doesn't conflict, we can skip the rest of the slots.
3182 */
3183 if ((lockmask & conflictMask) == 0)
3184 break;
3185
3186 /* Conflict! */
3187 GET_VXID_FROM_PGPROC(vxid, *proc);
3188
3190 vxids[count++] = vxid;
3191 /* else, xact already committed or aborted */
3192
3193 /* No need to examine remaining slots. */
3194 break;
3195 }
3196
3197 LWLockRelease(&proc->fpInfoLock);
3198 }
3199 }
3200
3201 /* Remember how many fast-path conflicts we found. */
3202 fast_count = count;
3203
3204 /*
3205 * Look up the lock object matching the tag.
3206 */
3207 LWLockAcquire(partitionLock, LW_SHARED);
3208
3210 locktag,
3211 hashcode,
3212 HASH_FIND,
3213 NULL);
3214 if (!lock)
3215 {
3216 /*
3217 * If the lock object doesn't exist, there is nothing holding a lock
3218 * on this lockable object.
3219 */
3220 LWLockRelease(partitionLock);
3221 vxids[count].procNumber = INVALID_PROC_NUMBER;
3223 if (countp)
3224 *countp = count;
3225 return vxids;
3226 }
3227
3228 /*
3229 * Examine each existing holder (or awaiter) of the lock.
3230 */
3231 dlist_foreach(proclock_iter, &lock->procLocks)
3232 {
3233 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3234
3235 if (conflictMask & proclock->holdMask)
3236 {
3237 PGPROC *proc = proclock->tag.myProc;
3238
3239 /* A backend never blocks itself */
3240 if (proc != MyProc)
3241 {
3243
3244 GET_VXID_FROM_PGPROC(vxid, *proc);
3245
3247 {
3248 int i;
3249
3250 /* Avoid duplicate entries. */
3251 for (i = 0; i < fast_count; ++i)
3252 if (VirtualTransactionIdEquals(vxids[i], vxid))
3253 break;
3254 if (i >= fast_count)
3255 vxids[count++] = vxid;
3256 }
3257 /* else, xact already committed or aborted */
3258 }
3259 }
3260 }
3261
3262 LWLockRelease(partitionLock);
3263
3264 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3265 elog(PANIC, "too many conflicting locks found");
3266
3267 vxids[count].procNumber = INVALID_PROC_NUMBER;
3269 if (countp)
3270 *countp = count;
3271 return vxids;
3272}
3273
3274/*
3275 * Find a lock in the shared lock table and release it. It is the caller's
3276 * responsibility to verify that this is a sane thing to do. (For example, it
3277 * would be bad to release a lock here if there might still be a LOCALLOCK
3278 * object with pointers to it.)
3279 *
3280 * We currently use this in two situations: first, to release locks held by
3281 * prepared transactions on commit (see lock_twophase_postcommit); and second,
3282 * to release locks taken via the fast-path, transferred to the main hash
3283 * table, and then released (see LockReleaseAll).
3284 */
3285static void
3287 LOCKTAG *locktag, LOCKMODE lockmode,
3288 bool decrement_strong_lock_count)
3289{
3290 LOCK *lock;
3291 PROCLOCK *proclock;
3292 PROCLOCKTAG proclocktag;
3293 uint32 hashcode;
3294 uint32 proclock_hashcode;
3295 LWLock *partitionLock;
3296 bool wakeupNeeded;
3297
3298 hashcode = LockTagHashCode(locktag);
3299 partitionLock = LockHashPartitionLock(hashcode);
3300
3301 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3302
3303 /*
3304 * Re-find the lock object (it had better be there).
3305 */
3307 locktag,
3308 hashcode,
3309 HASH_FIND,
3310 NULL);
3311 if (!lock)
3312 elog(PANIC, "failed to re-find shared lock object");
3313
3314 /*
3315 * Re-find the proclock object (ditto).
3316 */
3317 proclocktag.myLock = lock;
3318 proclocktag.myProc = proc;
3319
3320 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3321
3323 &proclocktag,
3324 proclock_hashcode,
3325 HASH_FIND,
3326 NULL);
3327 if (!proclock)
3328 elog(PANIC, "failed to re-find shared proclock object");
3329
3330 /*
3331 * Double-check that we are actually holding a lock of the type we want to
3332 * release.
3333 */
3334 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3335 {
3336 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3337 LWLockRelease(partitionLock);
3338 elog(WARNING, "you don't own a lock of type %s",
3339 lockMethodTable->lockModeNames[lockmode]);
3340 return;
3341 }
3342
3343 /*
3344 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3345 */
3346 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3347
3348 CleanUpLock(lock, proclock,
3349 lockMethodTable, hashcode,
3350 wakeupNeeded);
3351
3352 LWLockRelease(partitionLock);
3353
3354 /*
3355 * Decrement strong lock count. This logic is needed only for 2PC.
3356 */
3357 if (decrement_strong_lock_count
3358 && ConflictsWithRelationFastPath(locktag, lockmode))
3359 {
3360 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3361
3363 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3364 FastPathStrongRelationLocks->count[fasthashcode]--;
3366 }
3367}
3368
3369/*
3370 * CheckForSessionAndXactLocks
3371 * Check to see if transaction holds both session-level and xact-level
3372 * locks on the same object; if so, throw an error.
3373 *
3374 * If we have both session- and transaction-level locks on the same object,
3375 * PREPARE TRANSACTION must fail. This should never happen with regular
3376 * locks, since we only take those at session level in some special operations
3377 * like VACUUM. It's possible to hit this with advisory locks, though.
3378 *
3379 * It would be nice if we could keep the session hold and give away the
3380 * transactional hold to the prepared xact. However, that would require two
3381 * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3382 * available when it comes time for PostPrepare_Locks to do the deed.
3383 * So for now, we error out while we can still do so safely.
3384 *
3385 * Since the LOCALLOCK table stores a separate entry for each lockmode,
3386 * we can't implement this check by examining LOCALLOCK entries in isolation.
3387 * We must build a transient hashtable that is indexed by locktag only.
3388 */
3389static void
3391{
3392 typedef struct
3393 {
3394 LOCKTAG lock; /* identifies the lockable object */
3395 bool sessLock; /* is any lockmode held at session level? */
3396 bool xactLock; /* is any lockmode held at xact level? */
3397 } PerLockTagEntry;
3398
3399 HASHCTL hash_ctl;
3400 HTAB *lockhtab;
3401 HASH_SEQ_STATUS status;
3402 LOCALLOCK *locallock;
3403
3404 /* Create a local hash table keyed by LOCKTAG only */
3405 hash_ctl.keysize = sizeof(LOCKTAG);
3406 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3407 hash_ctl.hcxt = CurrentMemoryContext;
3408
3409 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3410 256, /* arbitrary initial size */
3411 &hash_ctl,
3413
3414 /* Scan local lock table to find entries for each LOCKTAG */
3416
3417 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3418 {
3419 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3420 PerLockTagEntry *hentry;
3421 bool found;
3422 int i;
3423
3424 /*
3425 * Ignore VXID locks. We don't want those to be held by prepared
3426 * transactions, since they aren't meaningful after a restart.
3427 */
3429 continue;
3430
3431 /* Ignore it if we don't actually hold the lock */
3432 if (locallock->nLocks <= 0)
3433 continue;
3434
3435 /* Otherwise, find or make an entry in lockhtab */
3436 hentry = (PerLockTagEntry *) hash_search(lockhtab,
3437 &locallock->tag.lock,
3438 HASH_ENTER, &found);
3439 if (!found) /* initialize, if newly created */
3440 hentry->sessLock = hentry->xactLock = false;
3441
3442 /* Scan to see if we hold lock at session or xact level or both */
3443 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3444 {
3445 if (lockOwners[i].owner == NULL)
3446 hentry->sessLock = true;
3447 else
3448 hentry->xactLock = true;
3449 }
3450
3451 /*
3452 * We can throw error immediately when we see both types of locks; no
3453 * need to wait around to see if there are more violations.
3454 */
3455 if (hentry->sessLock && hentry->xactLock)
3456 ereport(ERROR,
3457 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3458 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3459 }
3460
3461 /* Success, so clean up */
3462 hash_destroy(lockhtab);
3463}
3464
3465/*
3466 * AtPrepare_Locks
3467 * Do the preparatory work for a PREPARE: make 2PC state file records
3468 * for all locks currently held.
3469 *
3470 * Session-level locks are ignored, as are VXID locks.
3471 *
3472 * For the most part, we don't need to touch shared memory for this ---
3473 * all the necessary state information is in the locallock table.
3474 * Fast-path locks are an exception, however: we move any such locks to
3475 * the main table before allowing PREPARE TRANSACTION to succeed.
3476 */
3477void
3479{
3480 HASH_SEQ_STATUS status;
3481 LOCALLOCK *locallock;
3482
3483 /* First, verify there aren't locks of both xact and session level */
3485
3486 /* Now do the per-locallock cleanup work */
3488
3489 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3490 {
3491 TwoPhaseLockRecord record;
3492 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3493 bool haveSessionLock;
3494 bool haveXactLock;
3495 int i;
3496
3497 /*
3498 * Ignore VXID locks. We don't want those to be held by prepared
3499 * transactions, since they aren't meaningful after a restart.
3500 */
3502 continue;
3503
3504 /* Ignore it if we don't actually hold the lock */
3505 if (locallock->nLocks <= 0)
3506 continue;
3507
3508 /* Scan to see whether we hold it at session or transaction level */
3509 haveSessionLock = haveXactLock = false;
3510 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3511 {
3512 if (lockOwners[i].owner == NULL)
3513 haveSessionLock = true;
3514 else
3515 haveXactLock = true;
3516 }
3517
3518 /* Ignore it if we have only session lock */
3519 if (!haveXactLock)
3520 continue;
3521
3522 /* This can't happen, because we already checked it */
3523 if (haveSessionLock)
3524 ereport(ERROR,
3525 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3526 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3527
3528 /*
3529 * If the local lock was taken via the fast-path, we need to move it
3530 * to the primary lock table, or just get a pointer to the existing
3531 * primary lock table entry if by chance it's already been
3532 * transferred.
3533 */
3534 if (locallock->proclock == NULL)
3535 {
3536 locallock->proclock = FastPathGetRelationLockEntry(locallock);
3537 locallock->lock = locallock->proclock->tag.myLock;
3538 }
3539
3540 /*
3541 * Arrange to not release any strong lock count held by this lock
3542 * entry. We must retain the count until the prepared transaction is
3543 * committed or rolled back.
3544 */
3545 locallock->holdsStrongLockCount = false;
3546
3547 /*
3548 * Create a 2PC record.
3549 */
3550 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3551 record.lockmode = locallock->tag.mode;
3552
3554 &record, sizeof(TwoPhaseLockRecord));
3555 }
3556}
3557
3558/*
3559 * PostPrepare_Locks
3560 * Clean up after successful PREPARE
3561 *
3562 * Here, we want to transfer ownership of our locks to a dummy PGPROC
3563 * that's now associated with the prepared transaction, and we want to
3564 * clean out the corresponding entries in the LOCALLOCK table.
3565 *
3566 * Note: by removing the LOCALLOCK entries, we are leaving dangling
3567 * pointers in the transaction's resource owner. This is OK at the
3568 * moment since resowner.c doesn't try to free locks retail at a toplevel
3569 * transaction commit or abort. We could alternatively zero out nLocks
3570 * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3571 * but that probably costs more cycles.
3572 */
3573void
3575{
3576 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3577 HASH_SEQ_STATUS status;
3578 LOCALLOCK *locallock;
3579 LOCK *lock;
3580 PROCLOCK *proclock;
3581 PROCLOCKTAG proclocktag;
3582 int partition;
3583
3584 /* Can't prepare a lock group follower. */
3585 Assert(MyProc->lockGroupLeader == NULL ||
3587
3588 /* This is a critical section: any error means big trouble */
3590
3591 /*
3592 * First we run through the locallock table and get rid of unwanted
3593 * entries, then we scan the process's proclocks and transfer them to the
3594 * target proc.
3595 *
3596 * We do this separately because we may have multiple locallock entries
3597 * pointing to the same proclock, and we daren't end up with any dangling
3598 * pointers.
3599 */
3601
3602 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3603 {
3604 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3605 bool haveSessionLock;
3606 bool haveXactLock;
3607 int i;
3608
3609 if (locallock->proclock == NULL || locallock->lock == NULL)
3610 {
3611 /*
3612 * We must've run out of shared memory while trying to set up this
3613 * lock. Just forget the local entry.
3614 */
3615 Assert(locallock->nLocks == 0);
3616 RemoveLocalLock(locallock);
3617 continue;
3618 }
3619
3620 /* Ignore VXID locks */
3622 continue;
3623
3624 /* Scan to see whether we hold it at session or transaction level */
3625 haveSessionLock = haveXactLock = false;
3626 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3627 {
3628 if (lockOwners[i].owner == NULL)
3629 haveSessionLock = true;
3630 else
3631 haveXactLock = true;
3632 }
3633
3634 /* Ignore it if we have only session lock */
3635 if (!haveXactLock)
3636 continue;
3637
3638 /* This can't happen, because we already checked it */
3639 if (haveSessionLock)
3640 ereport(PANIC,
3641 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3642 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3643
3644 /* Mark the proclock to show we need to release this lockmode */
3645 if (locallock->nLocks > 0)
3646 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3647
3648 /* And remove the locallock hashtable entry */
3649 RemoveLocalLock(locallock);
3650 }
3651
3652 /*
3653 * Now, scan each lock partition separately.
3654 */
3655 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3656 {
3657 LWLock *partitionLock;
3658 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3659 dlist_mutable_iter proclock_iter;
3660
3661 partitionLock = LockHashPartitionLockByIndex(partition);
3662
3663 /*
3664 * If the proclock list for this partition is empty, we can skip
3665 * acquiring the partition lock. This optimization is safer than the
3666 * situation in LockReleaseAll, because we got rid of any fast-path
3667 * locks during AtPrepare_Locks, so there cannot be any case where
3668 * another backend is adding something to our lists now. For safety,
3669 * though, we code this the same way as in LockReleaseAll.
3670 */
3671 if (dlist_is_empty(procLocks))
3672 continue; /* needn't examine this partition */
3673
3674 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3675
3676 dlist_foreach_modify(proclock_iter, procLocks)
3677 {
3678 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3679
3680 Assert(proclock->tag.myProc == MyProc);
3681
3682 lock = proclock->tag.myLock;
3683
3684 /* Ignore VXID locks */
3686 continue;
3687
3688 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3689 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3690 Assert(lock->nRequested >= 0);
3691 Assert(lock->nGranted >= 0);
3692 Assert(lock->nGranted <= lock->nRequested);
3693 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3694
3695 /* Ignore it if nothing to release (must be a session lock) */
3696 if (proclock->releaseMask == 0)
3697 continue;
3698
3699 /* Else we should be releasing all locks */
3700 if (proclock->releaseMask != proclock->holdMask)
3701 elog(PANIC, "we seem to have dropped a bit somewhere");
3702
3703 /*
3704 * We cannot simply modify proclock->tag.myProc to reassign
3705 * ownership of the lock, because that's part of the hash key and
3706 * the proclock would then be in the wrong hash chain. Instead
3707 * use hash_update_hash_key. (We used to create a new hash entry,
3708 * but that risks out-of-memory failure if other processes are
3709 * busy making proclocks too.) We must unlink the proclock from
3710 * our procLink chain and put it into the new proc's chain, too.
3711 *
3712 * Note: the updated proclock hash key will still belong to the
3713 * same hash partition, cf proclock_hash(). So the partition lock
3714 * we already hold is sufficient for this.
3715 */
3716 dlist_delete(&proclock->procLink);
3717
3718 /*
3719 * Create the new hash key for the proclock.
3720 */
3721 proclocktag.myLock = lock;
3722 proclocktag.myProc = newproc;
3723
3724 /*
3725 * Update groupLeader pointer to point to the new proc. (We'd
3726 * better not be a member of somebody else's lock group!)
3727 */
3728 Assert(proclock->groupLeader == proclock->tag.myProc);
3729 proclock->groupLeader = newproc;
3730
3731 /*
3732 * Update the proclock. We should not find any existing entry for
3733 * the same hash key, since there can be only one entry for any
3734 * given lock with my own proc.
3735 */
3737 proclock,
3738 &proclocktag))
3739 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3740
3741 /* Re-link into the new proc's proclock list */
3742 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3743
3744 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3745 } /* loop over PROCLOCKs within this partition */
3746
3747 LWLockRelease(partitionLock);
3748 } /* loop over partitions */
3749
3751}
3752
3753
3754/*
3755 * Estimate shared-memory space used for lock tables
3756 */
3757Size
3759{
3760 Size size = 0;
3761 long max_table_size;
3762
3763 /* lock hash table */
3764 max_table_size = NLOCKENTS();
3765 size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3766
3767 /* proclock hash table */
3768 max_table_size *= 2;
3769 size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3770
3771 /*
3772 * Since NLOCKENTS is only an estimate, add 10% safety margin.
3773 */
3774 size = add_size(size, size / 10);
3775
3776 return size;
3777}
3778
3779/*
3780 * GetLockStatusData - Return a summary of the lock manager's internal
3781 * status, for use in a user-level reporting function.
3782 *
3783 * The return data consists of an array of LockInstanceData objects,
3784 * which are a lightly abstracted version of the PROCLOCK data structures,
3785 * i.e. there is one entry for each unique lock and interested PGPROC.
3786 * It is the caller's responsibility to match up related items (such as
3787 * references to the same lockable object or PGPROC) if wanted.
3788 *
3789 * The design goal is to hold the LWLocks for as short a time as possible;
3790 * thus, this function simply makes a copy of the necessary data and releases
3791 * the locks, allowing the caller to contemplate and format the data for as
3792 * long as it pleases.
3793 */
3794LockData *
3796{
3797 LockData *data;
3798 PROCLOCK *proclock;
3799 HASH_SEQ_STATUS seqstat;
3800 int els;
3801 int el;
3802 int i;
3803
3804 data = (LockData *) palloc(sizeof(LockData));
3805
3806 /* Guess how much space we'll need. */
3807 els = MaxBackends;
3808 el = 0;
3809 data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3810
3811 /*
3812 * First, we iterate through the per-backend fast-path arrays, locking
3813 * them one at a time. This might produce an inconsistent picture of the
3814 * system state, but taking all of those LWLocks at the same time seems
3815 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3816 * matter too much, because none of these locks can be involved in lock
3817 * conflicts anyway - anything that might must be present in the main lock
3818 * table. (For the same reason, we don't sweat about making leaderPid
3819 * completely valid. We cannot safely dereference another backend's
3820 * lockGroupLeader field without holding all lock partition locks, and
3821 * it's not worth that.)
3822 */
3823 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3824 {
3825 PGPROC *proc = &ProcGlobal->allProcs[i];
3826
3827 /* Skip backends with pid=0, as they don't hold fast-path locks */
3828 if (proc->pid == 0)
3829 continue;
3830
3832
3833 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3834 {
3835 /* Skip groups without registered fast-path locks */
3836 if (proc->fpLockBits[g] == 0)
3837 continue;
3838
3839 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3840 {
3841 LockInstanceData *instance;
3842 uint32 f = FAST_PATH_SLOT(g, j);
3843 uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3844
3845 /* Skip unallocated slots */
3846 if (!lockbits)
3847 continue;
3848
3849 if (el >= els)
3850 {
3851 els += MaxBackends;
3852 data->locks = (LockInstanceData *)
3853 repalloc(data->locks, sizeof(LockInstanceData) * els);
3854 }
3855
3856 instance = &data->locks[el];
3857 SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3858 proc->fpRelId[f]);
3859 instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3860 instance->waitLockMode = NoLock;
3861 instance->vxid.procNumber = proc->vxid.procNumber;
3862 instance->vxid.localTransactionId = proc->vxid.lxid;
3863 instance->pid = proc->pid;
3864 instance->leaderPid = proc->pid;
3865 instance->fastpath = true;
3866
3867 /*
3868 * Successfully taking fast path lock means there were no
3869 * conflicting locks.
3870 */
3871 instance->waitStart = 0;
3872
3873 el++;
3874 }
3875 }
3876
3877 if (proc->fpVXIDLock)
3878 {
3880 LockInstanceData *instance;
3881
3882 if (el >= els)
3883 {
3884 els += MaxBackends;
3885 data->locks = (LockInstanceData *)
3886 repalloc(data->locks, sizeof(LockInstanceData) * els);
3887 }
3888
3889 vxid.procNumber = proc->vxid.procNumber;
3891
3892 instance = &data->locks[el];
3893 SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3894 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3895 instance->waitLockMode = NoLock;
3896 instance->vxid.procNumber = proc->vxid.procNumber;
3897 instance->vxid.localTransactionId = proc->vxid.lxid;
3898 instance->pid = proc->pid;
3899 instance->leaderPid = proc->pid;
3900 instance->fastpath = true;
3901 instance->waitStart = 0;
3902
3903 el++;
3904 }
3905
3906 LWLockRelease(&proc->fpInfoLock);
3907 }
3908
3909 /*
3910 * Next, acquire lock on the entire shared lock data structure. We do
3911 * this so that, at least for locks in the primary lock table, the state
3912 * will be self-consistent.
3913 *
3914 * Since this is a read-only operation, we take shared instead of
3915 * exclusive lock. There's not a whole lot of point to this, because all
3916 * the normal operations require exclusive lock, but it doesn't hurt
3917 * anything either. It will at least allow two backends to do
3918 * GetLockStatusData in parallel.
3919 *
3920 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3921 */
3922 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3924
3925 /* Now we can safely count the number of proclocks */
3927 if (data->nelements > els)
3928 {
3929 els = data->nelements;
3930 data->locks = (LockInstanceData *)
3931 repalloc(data->locks, sizeof(LockInstanceData) * els);
3932 }
3933
3934 /* Now scan the tables to copy the data */
3936
3937 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3938 {
3939 PGPROC *proc = proclock->tag.myProc;
3940 LOCK *lock = proclock->tag.myLock;
3941 LockInstanceData *instance = &data->locks[el];
3942
3943 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3944 instance->holdMask = proclock->holdMask;
3945 if (proc->waitLock == proclock->tag.myLock)
3946 instance->waitLockMode = proc->waitLockMode;
3947 else
3948 instance->waitLockMode = NoLock;
3949 instance->vxid.procNumber = proc->vxid.procNumber;
3950 instance->vxid.localTransactionId = proc->vxid.lxid;
3951 instance->pid = proc->pid;
3952 instance->leaderPid = proclock->groupLeader->pid;
3953 instance->fastpath = false;
3954 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3955
3956 el++;
3957 }
3958
3959 /*
3960 * And release locks. We do this in reverse order for two reasons: (1)
3961 * Anyone else who needs more than one of the locks will be trying to lock
3962 * them in increasing order; we don't want to release the other process
3963 * until it can get all the locks it needs. (2) This avoids O(N^2)
3964 * behavior inside LWLockRelease.
3965 */
3966 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3968
3969 Assert(el == data->nelements);
3970
3971 return data;
3972}
3973
3974/*
3975 * GetBlockerStatusData - Return a summary of the lock manager's state
3976 * concerning locks that are blocking the specified PID or any member of
3977 * the PID's lock group, for use in a user-level reporting function.
3978 *
3979 * For each PID within the lock group that is awaiting some heavyweight lock,
3980 * the return data includes an array of LockInstanceData objects, which are
3981 * the same data structure used by GetLockStatusData; but unlike that function,
3982 * this one reports only the PROCLOCKs associated with the lock that that PID
3983 * is blocked on. (Hence, all the locktags should be the same for any one
3984 * blocked PID.) In addition, we return an array of the PIDs of those backends
3985 * that are ahead of the blocked PID in the lock's wait queue. These can be
3986 * compared with the PIDs in the LockInstanceData objects to determine which
3987 * waiters are ahead of or behind the blocked PID in the queue.
3988 *
3989 * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3990 * waiting on any heavyweight lock, return empty arrays.
3991 *
3992 * The design goal is to hold the LWLocks for as short a time as possible;
3993 * thus, this function simply makes a copy of the necessary data and releases
3994 * the locks, allowing the caller to contemplate and format the data for as
3995 * long as it pleases.
3996 */
3998GetBlockerStatusData(int blocked_pid)
3999{
4001 PGPROC *proc;
4002 int i;
4003
4005
4006 /*
4007 * Guess how much space we'll need, and preallocate. Most of the time
4008 * this will avoid needing to do repalloc while holding the LWLocks. (We
4009 * assume, but check with an Assert, that MaxBackends is enough entries
4010 * for the procs[] array; the other two could need enlargement, though.)
4011 */
4012 data->nprocs = data->nlocks = data->npids = 0;
4013 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
4014 data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
4015 data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
4016 data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
4017
4018 /*
4019 * In order to search the ProcArray for blocked_pid and assume that that
4020 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4021 * In addition, to examine the lock grouping fields of any other backend,
4022 * we must hold all the hash partition locks. (Only one of those locks is
4023 * actually relevant for any one lock group, but we can't know which one
4024 * ahead of time.) It's fairly annoying to hold all those locks
4025 * throughout this, but it's no worse than GetLockStatusData(), and it
4026 * does have the advantage that we're guaranteed to return a
4027 * self-consistent instantaneous state.
4028 */
4029 LWLockAcquire(ProcArrayLock, LW_SHARED);
4030
4031 proc = BackendPidGetProcWithLock(blocked_pid);
4032
4033 /* Nothing to do if it's gone */
4034 if (proc != NULL)
4035 {
4036 /*
4037 * Acquire lock on the entire shared lock data structure. See notes
4038 * in GetLockStatusData().
4039 */
4040 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4042
4043 if (proc->lockGroupLeader == NULL)
4044 {
4045 /* Easy case, proc is not a lock group member */
4047 }
4048 else
4049 {
4050 /* Examine all procs in proc's lock group */
4051 dlist_iter iter;
4052
4054 {
4055 PGPROC *memberProc;
4056
4057 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4059 }
4060 }
4061
4062 /*
4063 * And release locks. See notes in GetLockStatusData().
4064 */
4065 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4067
4068 Assert(data->nprocs <= data->maxprocs);
4069 }
4070
4071 LWLockRelease(ProcArrayLock);
4072
4073 return data;
4074}
4075
4076/* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4077static void
4079{
4080 LOCK *theLock = blocked_proc->waitLock;
4081 BlockedProcData *bproc;
4082 dlist_iter proclock_iter;
4083 dlist_iter proc_iter;
4084 dclist_head *waitQueue;
4085 int queue_size;
4086
4087 /* Nothing to do if this proc is not blocked */
4088 if (theLock == NULL)
4089 return;
4090
4091 /* Set up a procs[] element */
4092 bproc = &data->procs[data->nprocs++];
4093 bproc->pid = blocked_proc->pid;
4094 bproc->first_lock = data->nlocks;
4095 bproc->first_waiter = data->npids;
4096
4097 /*
4098 * We may ignore the proc's fast-path arrays, since nothing in those could
4099 * be related to a contended lock.
4100 */
4101
4102 /* Collect all PROCLOCKs associated with theLock */
4103 dlist_foreach(proclock_iter, &theLock->procLocks)
4104 {
4105 PROCLOCK *proclock =
4106 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4107 PGPROC *proc = proclock->tag.myProc;
4108 LOCK *lock = proclock->tag.myLock;
4109 LockInstanceData *instance;
4110
4111 if (data->nlocks >= data->maxlocks)
4112 {
4113 data->maxlocks += MaxBackends;
4114 data->locks = (LockInstanceData *)
4115 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4116 }
4117
4118 instance = &data->locks[data->nlocks];
4119 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4120 instance->holdMask = proclock->holdMask;
4121 if (proc->waitLock == lock)
4122 instance->waitLockMode = proc->waitLockMode;
4123 else
4124 instance->waitLockMode = NoLock;
4125 instance->vxid.procNumber = proc->vxid.procNumber;
4126 instance->vxid.localTransactionId = proc->vxid.lxid;
4127 instance->pid = proc->pid;
4128 instance->leaderPid = proclock->groupLeader->pid;
4129 instance->fastpath = false;
4130 data->nlocks++;
4131 }
4132
4133 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4134 waitQueue = &(theLock->waitProcs);
4135 queue_size = dclist_count(waitQueue);
4136
4137 if (queue_size > data->maxpids - data->npids)
4138 {
4139 data->maxpids = Max(data->maxpids + MaxBackends,
4140 data->npids + queue_size);
4141 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4142 sizeof(int) * data->maxpids);
4143 }
4144
4145 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4146 dclist_foreach(proc_iter, waitQueue)
4147 {
4148 PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4149
4150 if (queued_proc == blocked_proc)
4151 break;
4152 data->waiter_pids[data->npids++] = queued_proc->pid;
4153 queued_proc = (PGPROC *) queued_proc->links.next;
4154 }
4155
4156 bproc->num_locks = data->nlocks - bproc->first_lock;
4157 bproc->num_waiters = data->npids - bproc->first_waiter;
4158}
4159
4160/*
4161 * Returns a list of currently held AccessExclusiveLocks, for use by
4162 * LogStandbySnapshot(). The result is a palloc'd array,
4163 * with the number of elements returned into *nlocks.
4164 *
4165 * XXX This currently takes a lock on all partitions of the lock table,
4166 * but it's possible to do better. By reference counting locks and storing
4167 * the value in the ProcArray entry for each backend we could tell if any
4168 * locks need recording without having to acquire the partition locks and
4169 * scan the lock table. Whether that's worth the additional overhead
4170 * is pretty dubious though.
4171 */
4174{
4175 xl_standby_lock *accessExclusiveLocks;
4176 PROCLOCK *proclock;
4177 HASH_SEQ_STATUS seqstat;
4178 int i;
4179 int index;
4180 int els;
4181
4182 /*
4183 * Acquire lock on the entire shared lock data structure.
4184 *
4185 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4186 */
4187 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4189
4190 /* Now we can safely count the number of proclocks */
4192
4193 /*
4194 * Allocating enough space for all locks in the lock table is overkill,
4195 * but it's more convenient and faster than having to enlarge the array.
4196 */
4197 accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4198
4199 /* Now scan the tables to copy the data */
4201
4202 /*
4203 * If lock is a currently granted AccessExclusiveLock then it will have
4204 * just one proclock holder, so locks are never accessed twice in this
4205 * particular case. Don't copy this code for use elsewhere because in the
4206 * general case this will give you duplicate locks when looking at
4207 * non-exclusive lock types.
4208 */
4209 index = 0;
4210 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4211 {
4212 /* make sure this definition matches the one used in LockAcquire */
4213 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4215 {
4216 PGPROC *proc = proclock->tag.myProc;
4217 LOCK *lock = proclock->tag.myLock;
4218 TransactionId xid = proc->xid;
4219
4220 /*
4221 * Don't record locks for transactions if we know they have
4222 * already issued their WAL record for commit but not yet released
4223 * lock. It is still possible that we see locks held by already
4224 * complete transactions, if they haven't yet zeroed their xids.
4225 */
4226 if (!TransactionIdIsValid(xid))
4227 continue;
4228
4229 accessExclusiveLocks[index].xid = xid;
4230 accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4231 accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4232
4233 index++;
4234 }
4235 }
4236
4237 Assert(index <= els);
4238
4239 /*
4240 * And release locks. We do this in reverse order for two reasons: (1)
4241 * Anyone else who needs more than one of the locks will be trying to lock
4242 * them in increasing order; we don't want to release the other process
4243 * until it can get all the locks it needs. (2) This avoids O(N^2)
4244 * behavior inside LWLockRelease.
4245 */
4246 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4248
4249 *nlocks = index;
4250 return accessExclusiveLocks;
4251}
4252
4253/* Provide the textual name of any lock mode */
4254const char *
4256{
4257 Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4258 Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4259 return LockMethods[lockmethodid]->lockModeNames[mode];
4260}
4261
4262#ifdef LOCK_DEBUG
4263/*
4264 * Dump all locks in the given proc's myProcLocks lists.
4265 *
4266 * Caller is responsible for having acquired appropriate LWLocks.
4267 */
4268void
4269DumpLocks(PGPROC *proc)
4270{
4271 int i;
4272
4273 if (proc == NULL)
4274 return;
4275
4276 if (proc->waitLock)
4277 LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4278
4279 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4280 {
4281 dlist_head *procLocks = &proc->myProcLocks[i];
4282 dlist_iter iter;
4283
4284 dlist_foreach(iter, procLocks)
4285 {
4286 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4287 LOCK *lock = proclock->tag.myLock;
4288
4289 Assert(proclock->tag.myProc == proc);
4290 PROCLOCK_PRINT("DumpLocks", proclock);
4291 LOCK_PRINT("DumpLocks", lock, 0);
4292 }
4293 }
4294}
4295
4296/*
4297 * Dump all lmgr locks.
4298 *
4299 * Caller is responsible for having acquired appropriate LWLocks.
4300 */
4301void
4302DumpAllLocks(void)
4303{
4304 PGPROC *proc;
4305 PROCLOCK *proclock;
4306 LOCK *lock;
4307 HASH_SEQ_STATUS status;
4308
4309 proc = MyProc;
4310
4311 if (proc && proc->waitLock)
4312 LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4313
4315
4316 while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4317 {
4318 PROCLOCK_PRINT("DumpAllLocks", proclock);
4319
4320 lock = proclock->tag.myLock;
4321 if (lock)
4322 LOCK_PRINT("DumpAllLocks", lock, 0);
4323 else
4324 elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4325 }
4326}
4327#endif /* LOCK_DEBUG */
4328
4329/*
4330 * LOCK 2PC resource manager's routines
4331 */
4332
4333/*
4334 * Re-acquire a lock belonging to a transaction that was prepared.
4335 *
4336 * Because this function is run at db startup, re-acquiring the locks should
4337 * never conflict with running transactions because there are none. We
4338 * assume that the lock state represented by the stored 2PC files is legal.
4339 *
4340 * When switching from Hot Standby mode to normal operation, the locks will
4341 * be already held by the startup process. The locks are acquired for the new
4342 * procs without checking for conflicts, so we don't get a conflict between the
4343 * startup process and the dummy procs, even though we will momentarily have
4344 * a situation where two procs are holding the same AccessExclusiveLock,
4345 * which isn't normally possible because the conflict. If we're in standby
4346 * mode, but a recovery snapshot hasn't been established yet, it's possible
4347 * that some but not all of the locks are already held by the startup process.
4348 *
4349 * This approach is simple, but also a bit dangerous, because if there isn't
4350 * enough shared memory to acquire the locks, an error will be thrown, which
4351 * is promoted to FATAL and recovery will abort, bringing down postmaster.
4352 * A safer approach would be to transfer the locks like we do in
4353 * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4354 * read-only backends to use up all the shared lock memory anyway, so that
4355 * replaying the WAL record that needs to acquire a lock will throw an error
4356 * and PANIC anyway.
4357 */
4358void
4360 void *recdata, uint32 len)
4361{
4362 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4363 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4364 LOCKTAG *locktag;
4365 LOCKMODE lockmode;
4366 LOCKMETHODID lockmethodid;
4367 LOCK *lock;
4368 PROCLOCK *proclock;
4369 PROCLOCKTAG proclocktag;
4370 bool found;
4371 uint32 hashcode;
4372 uint32 proclock_hashcode;
4373 int partition;
4374 LWLock *partitionLock;
4375 LockMethod lockMethodTable;
4376
4377 Assert(len == sizeof(TwoPhaseLockRecord));
4378 locktag = &rec->locktag;
4379 lockmode = rec->lockmode;
4380 lockmethodid = locktag->locktag_lockmethodid;
4381
4382 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4383 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4384 lockMethodTable = LockMethods[lockmethodid];
4385
4386 hashcode = LockTagHashCode(locktag);
4387 partition = LockHashPartition(hashcode);
4388 partitionLock = LockHashPartitionLock(hashcode);
4389
4390 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4391
4392 /*
4393 * Find or create a lock with this tag.
4394 */
4396 locktag,
4397 hashcode,
4399 &found);
4400 if (!lock)
4401 {
4402 LWLockRelease(partitionLock);
4403 ereport(ERROR,
4404 (errcode(ERRCODE_OUT_OF_MEMORY),
4405 errmsg("out of shared memory"),
4406 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4407 }
4408
4409 /*
4410 * if it's a new lock object, initialize it
4411 */
4412 if (!found)
4413 {
4414 lock->grantMask = 0;
4415 lock->waitMask = 0;
4416 dlist_init(&lock->procLocks);
4417 dclist_init(&lock->waitProcs);
4418 lock->nRequested = 0;
4419 lock->nGranted = 0;
4420 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4421 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4422 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4423 }
4424 else
4425 {
4426 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4427 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4428 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4429 Assert(lock->nGranted <= lock->nRequested);
4430 }
4431
4432 /*
4433 * Create the hash key for the proclock table.
4434 */
4435 proclocktag.myLock = lock;
4436 proclocktag.myProc = proc;
4437
4438 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4439
4440 /*
4441 * Find or create a proclock entry with this tag
4442 */
4444 &proclocktag,
4445 proclock_hashcode,
4447 &found);
4448 if (!proclock)
4449 {
4450 /* Oops, not enough shmem for the proclock */
4451 if (lock->nRequested == 0)
4452 {
4453 /*
4454 * There are no other requestors of this lock, so garbage-collect
4455 * the lock object. We *must* do this to avoid a permanent leak
4456 * of shared memory, because there won't be anything to cause
4457 * anyone to release the lock object later.
4458 */
4461 &(lock->tag),
4462 hashcode,
4464 NULL))
4465 elog(PANIC, "lock table corrupted");
4466 }
4467 LWLockRelease(partitionLock);
4468 ereport(ERROR,
4469 (errcode(ERRCODE_OUT_OF_MEMORY),
4470 errmsg("out of shared memory"),
4471 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4472 }
4473
4474 /*
4475 * If new, initialize the new entry
4476 */
4477 if (!found)
4478 {
4479 Assert(proc->lockGroupLeader == NULL);
4480 proclock->groupLeader = proc;
4481 proclock->holdMask = 0;
4482 proclock->releaseMask = 0;
4483 /* Add proclock to appropriate lists */
4484 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4485 dlist_push_tail(&proc->myProcLocks[partition],
4486 &proclock->procLink);
4487 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4488 }
4489 else
4490 {
4491 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4492 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4493 }
4494
4495 /*
4496 * lock->nRequested and lock->requested[] count the total number of
4497 * requests, whether granted or waiting, so increment those immediately.
4498 */
4499 lock->nRequested++;
4500 lock->requested[lockmode]++;
4501 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4502
4503 /*
4504 * We shouldn't already hold the desired lock.
4505 */
4506 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4507 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4508 lockMethodTable->lockModeNames[lockmode],
4509 lock->tag.locktag_field1, lock->tag.locktag_field2,
4510 lock->tag.locktag_field3);
4511
4512 /*
4513 * We ignore any possible conflicts and just grant ourselves the lock. Not
4514 * only because we don't bother, but also to avoid deadlocks when
4515 * switching from standby to normal mode. See function comment.
4516 */
4517 GrantLock(lock, proclock, lockmode);
4518
4519 /*
4520 * Bump strong lock count, to make sure any fast-path lock requests won't
4521 * be granted without consulting the primary lock table.
4522 */
4523 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4524 {
4525 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4526
4528 FastPathStrongRelationLocks->count[fasthashcode]++;
4530 }
4531
4532 LWLockRelease(partitionLock);
4533}
4534
4535/*
4536 * Re-acquire a lock belonging to a transaction that was prepared, when
4537 * starting up into hot standby mode.
4538 */
4539void
4541 void *recdata, uint32 len)
4542{
4543 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4544 LOCKTAG *locktag;
4545 LOCKMODE lockmode;
4546 LOCKMETHODID lockmethodid;
4547
4548 Assert(len == sizeof(TwoPhaseLockRecord));
4549 locktag = &rec->locktag;
4550 lockmode = rec->lockmode;
4551 lockmethodid = locktag->locktag_lockmethodid;
4552
4553 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4554 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4555
4556 if (lockmode == AccessExclusiveLock &&
4557 locktag->locktag_type == LOCKTAG_RELATION)
4558 {
4560 locktag->locktag_field1 /* dboid */ ,
4561 locktag->locktag_field2 /* reloid */ );
4562 }
4563}
4564
4565
4566/*
4567 * 2PC processing routine for COMMIT PREPARED case.
4568 *
4569 * Find and release the lock indicated by the 2PC record.
4570 */
4571void
4573 void *recdata, uint32 len)
4574{
4575 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4576 PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4577 LOCKTAG *locktag;
4578 LOCKMETHODID lockmethodid;
4579 LockMethod lockMethodTable;
4580
4581 Assert(len == sizeof(TwoPhaseLockRecord));
4582 locktag = &rec->locktag;
4583 lockmethodid = locktag->locktag_lockmethodid;
4584
4585 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4586 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4587 lockMethodTable = LockMethods[lockmethodid];
4588
4589 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4590}
4591
4592/*
4593 * 2PC processing routine for ROLLBACK PREPARED case.
4594 *
4595 * This is actually just the same as the COMMIT case.
4596 */
4597void
4599 void *recdata, uint32 len)
4600{
4601 lock_twophase_postcommit(fxid, info, recdata, len);
4602}
4603
4604/*
4605 * VirtualXactLockTableInsert
4606 *
4607 * Take vxid lock via the fast-path. There can't be any pre-existing
4608 * lockers, as we haven't advertised this vxid via the ProcArray yet.
4609 *
4610 * Since MyProc->fpLocalTransactionId will normally contain the same data
4611 * as MyProc->vxid.lxid, you might wonder if we really need both. The
4612 * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4613 * examined by procarray.c, while fpLocalTransactionId is protected by
4614 * fpInfoLock and is used only by the locking subsystem. Doing it this
4615 * way makes it easier to verify that there are no funny race conditions.
4616 *
4617 * We don't bother recording this lock in the local lock table, since it's
4618 * only ever released at the end of a transaction. Instead,
4619 * LockReleaseAll() calls VirtualXactLockTableCleanup().
4620 */
4621void
4623{
4625
4627
4630 Assert(MyProc->fpVXIDLock == false);
4631
4632 MyProc->fpVXIDLock = true;
4634
4636}
4637
4638/*
4639 * VirtualXactLockTableCleanup
4640 *
4641 * Check whether a VXID lock has been materialized; if so, release it,
4642 * unblocking waiters.
4643 */
4644void
4646{
4647 bool fastpath;
4648 LocalTransactionId lxid;
4649
4651
4652 /*
4653 * Clean up shared memory state.
4654 */
4656
4657 fastpath = MyProc->fpVXIDLock;
4659 MyProc->fpVXIDLock = false;
4661
4663
4664 /*
4665 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4666 * that means someone transferred the lock to the main lock table.
4667 */
4668 if (!fastpath && LocalTransactionIdIsValid(lxid))
4669 {
4671 LOCKTAG locktag;
4672
4673 vxid.procNumber = MyProcNumber;
4674 vxid.localTransactionId = lxid;
4675 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4676
4678 &locktag, ExclusiveLock, false);
4679 }
4680}
4681
4682/*
4683 * XactLockForVirtualXact
4684 *
4685 * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4686 * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4687 * functions, it assumes "xid" is never a subtransaction and that "xid" is
4688 * prepared, committed, or aborted.
4689 *
4690 * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4691 * known as "vxid" before its PREPARE TRANSACTION.
4692 */
4693static bool
4695 TransactionId xid, bool wait)
4696{
4697 bool more = false;
4698
4699 /* There is no point to wait for 2PCs if you have no 2PCs. */
4700 if (max_prepared_xacts == 0)
4701 return true;
4702
4703 do
4704 {
4706 LOCKTAG tag;
4707
4708 /* Clear state from previous iterations. */
4709 if (more)
4710 {
4712 more = false;
4713 }
4714
4715 /* If we have no xid, try to find one. */
4716 if (!TransactionIdIsValid(xid))
4717 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4718 if (!TransactionIdIsValid(xid))
4719 {
4720 Assert(!more);
4721 return true;
4722 }
4723
4724 /* Check or wait for XID completion. */
4725 SET_LOCKTAG_TRANSACTION(tag, xid);
4726 lar = LockAcquire(&tag, ShareLock, false, !wait);
4727 if (lar == LOCKACQUIRE_NOT_AVAIL)
4728 return false;
4729 LockRelease(&tag, ShareLock, false);
4730 } while (more);
4731
4732 return true;
4733}
4734
4735/*
4736 * VirtualXactLock
4737 *
4738 * If wait = true, wait as long as the given VXID or any XID acquired by the
4739 * same transaction is still running. Then, return true.
4740 *
4741 * If wait = false, just check whether that VXID or one of those XIDs is still
4742 * running, and return true or false.
4743 */
4744bool
4746{
4747 LOCKTAG tag;
4748 PGPROC *proc;
4750
4752
4754 /* no vxid lock; localTransactionId is a normal, locked XID */
4755 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4756
4758
4759 /*
4760 * If a lock table entry must be made, this is the PGPROC on whose behalf
4761 * it must be done. Note that the transaction might end or the PGPROC
4762 * might be reassigned to a new backend before we get around to examining
4763 * it, but it doesn't matter. If we find upon examination that the
4764 * relevant lxid is no longer running here, that's enough to prove that
4765 * it's no longer running anywhere.
4766 */
4767 proc = ProcNumberGetProc(vxid.procNumber);
4768 if (proc == NULL)
4769 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4770
4771 /*
4772 * We must acquire this lock before checking the procNumber and lxid
4773 * against the ones we're waiting for. The target backend will only set
4774 * or clear lxid while holding this lock.
4775 */
4777
4778 if (proc->vxid.procNumber != vxid.procNumber
4780 {
4781 /* VXID ended */
4782 LWLockRelease(&proc->fpInfoLock);
4783 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4784 }
4785
4786 /*
4787 * If we aren't asked to wait, there's no need to set up a lock table
4788 * entry. The transaction is still in progress, so just return false.
4789 */
4790 if (!wait)
4791 {
4792 LWLockRelease(&proc->fpInfoLock);
4793 return false;
4794 }
4795
4796 /*
4797 * OK, we're going to need to sleep on the VXID. But first, we must set
4798 * up the primary lock table entry, if needed (ie, convert the proc's
4799 * fast-path lock on its VXID to a regular lock).
4800 */
4801 if (proc->fpVXIDLock)
4802 {
4803 PROCLOCK *proclock;
4804 uint32 hashcode;
4805 LWLock *partitionLock;
4806
4807 hashcode = LockTagHashCode(&tag);
4808
4809 partitionLock = LockHashPartitionLock(hashcode);
4810 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4811
4813 &tag, hashcode, ExclusiveLock);
4814 if (!proclock)
4815 {
4816 LWLockRelease(partitionLock);
4817 LWLockRelease(&proc->fpInfoLock);
4818 ereport(ERROR,
4819 (errcode(ERRCODE_OUT_OF_MEMORY),
4820 errmsg("out of shared memory"),
4821 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4822 }
4823 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4824
4825 LWLockRelease(partitionLock);
4826
4827 proc->fpVXIDLock = false;
4828 }
4829
4830 /*
4831 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4832 * search. The proc might have assigned this XID but not yet locked it,
4833 * in which case the proc will lock this XID before releasing the VXID.
4834 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4835 * so we won't save an XID of a different VXID. It doesn't matter whether
4836 * we save this before or after setting up the primary lock table entry.
4837 */
4838 xid = proc->xid;
4839
4840 /* Done with proc->fpLockBits */
4841 LWLockRelease(&proc->fpInfoLock);
4842
4843 /* Time to wait. */
4844 (void) LockAcquire(&tag, ShareLock, false, false);
4845
4846 LockRelease(&tag, ShareLock, false);
4847 return XactLockForVirtualXact(vxid, xid, wait);
4848}
4849
4850/*
4851 * LockWaiterCount
4852 *
4853 * Find the number of lock requester on this locktag
4854 */
4855int
4857{
4858 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4859 LOCK *lock;
4860 bool found;
4861 uint32 hashcode;
4862 LWLock *partitionLock;
4863 int waiters = 0;
4864
4865 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4866 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4867
4868 hashcode = LockTagHashCode(locktag);
4869 partitionLock = LockHashPartitionLock(hashcode);
4870 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4871
4873 locktag,
4874 hashcode,
4875 HASH_FIND,
4876 &found);
4877 if (found)
4878 {
4879 Assert(lock != NULL);
4880 waiters = lock->nRequested;
4881 }
4882 LWLockRelease(partitionLock);
4883
4884 return waiters;
4885}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:465
#define Max(x, y)
Definition: c.h:998
int64_t int64
Definition: c.h:536
uint16_t uint16
Definition: c.h:538
uint32_t uint32
Definition: c.h:539
#define lengthof(array)
Definition: c.h:788
uint32 LocalTransactionId
Definition: c.h:660
#define MemSet(start, val, len)
Definition: c.h:1020
uint32 TransactionId
Definition: c.h:658
size_t Size
Definition: c.h:611
int64 TimestampTz
Definition: timestamp.h:39
void DeadLockReport(void)
Definition: deadlock.c:1075
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
Size hash_estimate_size(int64 num_entries, Size entrysize)
Definition: dynahash.c:783
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:358
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:965
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1415
int64 hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1336
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1140
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:908
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
ErrorContextCallback * error_context_stack
Definition: elog.c:95
int errhint(const char *fmt,...)
Definition: elog.c:1321
int errcode(int sqlerrcode)
Definition: elog.c:854
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1276
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define LOG
Definition: elog.h:31
#define PG_RE_THROW()
Definition: elog.h:405
#define errcontext
Definition: elog.h:198
#define PG_TRY(...)
Definition: elog.h:372
#define WARNING
Definition: elog.h:36
#define PG_END_TRY(...)
Definition: elog.h:397
#define PANIC
Definition: elog.h:42
#define ERROR
Definition: elog.h:39
#define PG_CATCH(...)
Definition: elog.h:382
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
int MyProcPid
Definition: globals.c:47
ProcNumber MyProcNumber
Definition: globals.c:90
int MaxBackends
Definition: globals.c:146
Assert(PointerIsAligned(start, uint64))
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
@ HASH_ENTER_NULL
Definition: hsearch.h:116
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_PARTITION
Definition: hsearch.h:92
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int j
Definition: isn.c:78
int i
Definition: isn.c:77
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1249
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4694
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:809
static LOCALLOCK * awaitedLock
Definition: lock.c:328
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1476
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2736
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition: lock.c:643
static bool Dummy_trace
Definition: lock.c:122
static const char *const lock_mode_names[]
Definition: lock.c:108
void lock_twophase_postabort(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4598
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:405
void PostPrepare_Locks(FullTransactionId fxid)
Definition: lock.c:3574
void lock_twophase_standby_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4540
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:623
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1283
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2958
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition: lock.c:4622
#define NLOCKENTS()
Definition: lock.c:56
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:303
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:605
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:256
void GrantAwaitedLock(void)
Definition: lock.c:1889
int LockWaiterCount(const LOCKTAG *locktag)
Definition: lock.c:4856
void AtPrepare_Locks(void)
Definition: lock.c:3478
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:2102
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:242
Size LockManagerShmemSize(void)
Definition: lock.c:3758
#define FAST_PATH_REL_GROUP(rel)
Definition: lock.c:217
void InitLockManagerAccess(void)
Definition: lock.c:505
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1658
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4645
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition: lock.c:4745
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:3069
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:312
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:2046
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
Definition: lock.c:836
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2307
#define FAST_PATH_SLOT(group, index)
Definition: lock.c:224
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1464
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:273
void ResetAwaitedLock(void)
Definition: lock.c:1907
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2861
static HTAB * LockMethodLocalHash
Definition: lock.c:323
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2706
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1681
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:252
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:406
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1738
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:574
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2825
void AbortStrongLockAcquire(void)
Definition: lock.c:1860
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2782
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition: lock.c:176
static HTAB * LockMethodLockHash
Definition: lock.c:321
static ResourceOwner awaitedOwner
Definition: lock.c:329
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition: lock.c:3998
void LockManagerShmemInit(void)
Definition: lock.c:444
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1932
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:696
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4255
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:4078
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:254
void lock_twophase_postcommit(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4572
int max_locks_per_xact
Definition: lock.c:53
static const LockMethod LockMethods[]
Definition: lock.c:150
static void waitonlock_error_callback(void *arg)
Definition: lock.c:2020
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2611
LOCALLOCK * GetAwaitedLock(void)
Definition: lock.c:1898
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition: lock.c:2581
void MarkLockClear(LOCALLOCK *locallock)
Definition: lock.c:1920
LockData * GetLockStatusData(void)
Definition: lock.c:3795
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY
Definition: lock.c:191
static const LockMethodData default_lockmethod
Definition: lock.c:125
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:245
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:327
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:241
static const LockMethodData user_lockmethod
Definition: lock.c:136
int FastPathLockGroupsPerBackend
Definition: lock.c:202
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:267
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:557
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1824
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1529
void lock_twophase_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4359
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1792
static const LOCKMASK LockConflicts[]
Definition: lock.c:65
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2646
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:527
static void FinishStrongLockAcquire(void)
Definition: lock.c:1850
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition: lock.c:301
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition: lock.c:4173
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3286
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3390
static HTAB * LockMethodProcLockHash
Definition: lock.c:322
struct TwoPhaseLockRecord TwoPhaseLockRecord
bool log_lock_failures
Definition: lock.c:54
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition: lock.c:539
uint16 LOCKMETHODID
Definition: lock.h:124
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
struct LOCALLOCK LOCALLOCK
#define LOCK_LOCKTAG(lock)
Definition: lock.h:327
struct LOCK LOCK
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:237
struct PROCLOCK PROCLOCK
@ LOCKTAG_OBJECT
Definition: lock.h:147
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:140
@ LOCKTAG_RELATION
Definition: lock.h:139
@ LOCKTAG_TUPLE
Definition: lock.h:143
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:145
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:69
#define LockHashPartitionLock(hashcode)
Definition: lock.h:528
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:79
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:326
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:87
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:445
#define InvalidLocalTransactionId
Definition: lock.h:67
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:228
struct LOCKTAG LOCKTAG
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:183
#define MAX_LOCKMODES
Definition: lock.h:84
struct PROCLOCKTAG PROCLOCKTAG
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:68
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:446
#define LockHashPartition(hashcode)
Definition: lock.h:526
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:73
struct LOCALLOCKTAG LOCALLOCKTAG
#define PROCLOCK_LOCKMETHOD(proclock)
Definition: lock.h:384
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:531
LockAcquireResult
Definition: lock.h:502
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:506
@ LOCKACQUIRE_OK
Definition: lock.h:504
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:505
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:503
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:71
int LOCKMODE
Definition: lockdefs.h:26
#define NoLock
Definition: lockdefs.h:34
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define ShareRowExclusiveLock
Definition: lockdefs.h:41
#define AccessShareLock
Definition: lockdefs.h:36
int LOCKMASK
Definition: lockdefs.h:25
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
#define ExclusiveLock
Definition: lockdefs.h:42
#define RowShareLock
Definition: lockdefs.h:37
#define ShareLock
Definition: lockdefs.h:40
#define MaxLockMode
Definition: lockdefs.h:45
#define RowExclusiveLock
Definition: lockdefs.h:38
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1174
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:95
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:94
@ LW_SHARED
Definition: lwlock.h:113
@ LW_EXCLUSIVE
Definition: lwlock.h:112
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1610
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
MemoryContext TopMemoryContext
Definition: mcxt.c:166
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
void * arg
static PgChecksumMode mode
Definition: pg_checksums.c:55
const void size_t len
const void * data
static char * buf
Definition: pg_test_fsync.c:72
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:232
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
uint64_t Datum
Definition: postgres.h:70
unsigned int Oid
Definition: postgres_ext.h:32
#define FP_LOCK_GROUPS_PER_BACKEND_MAX
Definition: proc.h:91
#define FastPathLockSlotsPerBackend()
Definition: proc.h:93
#define FP_LOCK_SLOTS_PER_GROUP
Definition: proc.h:92
ProcWaitStatus
Definition: proc.h:140
@ PROC_WAIT_STATUS_OK
Definition: proc.h:141
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:142
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:143
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3181
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition: procarray.c:3100
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:439
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:387
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1059
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:902
ResourceOwner CurrentResourceOwner
Definition: resowner.c:173
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1079
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
HTAB * ShmemInitHash(const char *name, int64 init_size, int64 max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:332
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1140
PGPROC * MyProc
Definition: proc.c:66
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition: proc.c:1900
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition: proc.c:1309
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1739
PROC_HDR * ProcGlobal
Definition: proc.c:78
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1448
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:986
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1431
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int first_lock
Definition: lock.h:478
int first_waiter
Definition: lock.h:482
int num_waiters
Definition: lock.h:483
int num_locks
Definition: lock.h:479
struct ErrorContextCallback * previous
Definition: elog.h:297
void(* callback)(void *arg)
Definition: elog.h:298
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:309
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
int64 num_partitions
Definition: hsearch.h:68
Definition: dynahash.c:222
int64 nLocks
Definition: lock.h:425
struct ResourceOwnerData * owner
Definition: lock.h:424
LOCKTAG lock
Definition: lock.h:412
LOCKMODE mode
Definition: lock.h:413
LOCALLOCKOWNER * lockOwners
Definition: lock.h:440
uint32 hashcode
Definition: lock.h:434
int maxLockOwners
Definition: lock.h:439
LOCK * lock
Definition: lock.h:435
int64 nLocks
Definition: lock.h:437
int numLockOwners
Definition: lock.h:438
bool holdsStrongLockCount
Definition: lock.h:441
PROCLOCK * proclock
Definition: lock.h:436
LOCALLOCKTAG tag
Definition: lock.h:431
bool lockCleared
Definition: lock.h:442
Definition: lock.h:167
uint8 locktag_type
Definition: lock.h:172
uint32 locktag_field3
Definition: lock.h:170
uint32 locktag_field1
Definition: lock.h:168
uint8 locktag_lockmethodid
Definition: lock.h:173
uint16 locktag_field4
Definition: lock.h:171
uint32 locktag_field2
Definition: lock.h:169
Definition: lock.h:311
int nRequested
Definition: lock.h:321
LOCKTAG tag
Definition: lock.h:313
int requested[MAX_LOCKMODES]
Definition: lock.h:320
dclist_head waitProcs
Definition: lock.h:319
int granted[MAX_LOCKMODES]
Definition: lock.h:322
LOCKMASK grantMask
Definition: lock.h:316
LOCKMASK waitMask
Definition: lock.h:317
int nGranted
Definition: lock.h:323
dlist_head procLocks
Definition: lock.h:318
Definition: lwlock.h:42
Definition: lock.h:468
LOCKMASK holdMask
Definition: lock.h:457
LOCKMODE waitLockMode
Definition: lock.h:458
bool fastpath
Definition: lock.h:464
LOCKTAG locktag
Definition: lock.h:456
TimestampTz waitStart
Definition: lock.h:460
int leaderPid
Definition: lock.h:463
VirtualTransactionId vxid
Definition: lock.h:459
const bool * trace_flag
Definition: lock.h:115
const LOCKMASK * conflictTab
Definition: lock.h:113
const char *const * lockModeNames
Definition: lock.h:114
int numLockModes
Definition: lock.h:112
Definition: proc.h:179
LWLock fpInfoLock
Definition: proc.h:310
LocalTransactionId lxid
Definition: proc.h:217
PROCLOCK * waitProcLock
Definition: proc.h:250
dlist_head lockGroupMembers
Definition: proc.h:322
Oid * fpRelId
Definition: proc.h:312
Oid databaseId
Definition: proc.h:224
uint64 * fpLockBits
Definition: proc.h:311
struct PGPROC::@128 vxid
pg_atomic_uint64 waitStart
Definition: proc.h:254
bool fpVXIDLock
Definition: proc.h:313
ProcNumber procNumber
Definition: proc.h:212
int pid
Definition: proc.h:199
LOCK * waitLock
Definition: proc.h:249
TransactionId xid
Definition: proc.h:189
LOCKMODE waitLockMode
Definition: proc.h:251
PGPROC * lockGroupLeader
Definition: proc.h:321
LocalTransactionId fpLocalTransactionId
Definition: proc.h:314
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:278
ProcWaitStatus waitStatus
Definition: proc.h:184
dlist_node links
Definition: proc.h:180
LOCK * myLock
Definition: lock.h:367
PGPROC * myProc
Definition: lock.h:368
Definition: lock.h:372
LOCKMASK holdMask
Definition: lock.h:378
dlist_node lockLink
Definition: lock.h:380
PGPROC * groupLeader
Definition: lock.h:377
LOCKMASK releaseMask
Definition: lock.h:379
PROCLOCKTAG tag
Definition: lock.h:374
dlist_node procLink
Definition: lock.h:381
PGPROC * allProcs
Definition: proc.h:388
uint32 allProcCount
Definition: proc.h:406
LOCKTAG locktag
Definition: lock.c:160
LOCKMODE lockmode
Definition: lock.c:161
LocalTransactionId localTransactionId
Definition: lock.h:64
ProcNumber procNumber
Definition: lock.h:63
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
dlist_node * next
Definition: ilist.h:140
Definition: type.h:96
TransactionId xid
Definition: lockdefs.h:53
#define InvalidTransactionId
Definition: transam.h:31
#define XidFromFullTransactionId(x)
Definition: transam.h:48
#define FirstNormalObjectId
Definition: transam.h:197
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1271
int max_prepared_xacts
Definition: twophase.c:116
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:857
PGPROC * TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
Definition: twophase.c:923
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:27
const char * type
bool RecoveryInProgress(void)
Definition: xlog.c:6383
#define XLogStandbyInfoActive()
Definition: xlog.h:123
bool InRecovery
Definition: xlogutils.c:50
#define InHotStandby
Definition: xlogutils.h:60
static struct link * links
Definition: zic.c:299