Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_REL_GROUP(rel)    (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
 
#define FAST_PATH_SLOT(group, index)
 
#define FAST_PATH_GROUP(index)
 
#define FAST_PATH_INDEX(index)
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_BITS(proc, n)   (proc)->fpLockBits[FAST_PATH_GROUP(n)]
 
#define FAST_PATH_GET_BITS(proc, n)    ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static ProcWaitStatus WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void waitonlock_error_callback (void *arg)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void LockManagerShmemInit (void)
 
void InitLockManagerAccess (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
LOCALLOCKGetAwaitedLock (void)
 
void ResetAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (FullTransactionId fxid)
 
Size LockManagerShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
bool log_lock_failures = false
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCounts [FP_LOCK_GROUPS_PER_BACKEND_MAX]
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
int FastPathLockGroupsPerBackend = 0
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
@ LOCKTAG_RELATION
Definition: lock.h:139
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
static PgChecksumMode mode
Definition: pg_checksums.c:55
#define InvalidOid
Definition: postgres_ext.h:37

Definition at line 273 of file lock.c.

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition: globals.c:94

Definition at line 267 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:
AssertMacro((n) < FastPathLockSlotsPerBackend()), \
#define AssertMacro(condition)
Definition: c.h:859
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:242
#define FAST_PATH_INDEX(index)
Definition: lock.c:236
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:241
#define FastPathLockSlotsPerBackend()
Definition: proc.h:93

Definition at line 247 of file lock.c.

◆ FAST_PATH_BITS

#define FAST_PATH_BITS (   proc,
 
)    (proc)->fpLockBits[FAST_PATH_GROUP(n)]

Definition at line 244 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 241 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 256 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 254 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)

Definition at line 245 of file lock.c.

◆ FAST_PATH_GROUP

#define FAST_PATH_GROUP (   index)
Value:
uint32_t uint32
Definition: c.h:539
#define FP_LOCK_SLOTS_PER_GROUP
Definition: proc.h:92
Definition: type.h:96

Definition at line 233 of file lock.c.

◆ FAST_PATH_INDEX

#define FAST_PATH_INDEX (   index)
Value:

Definition at line 236 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 242 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 243 of file lock.c.

◆ FAST_PATH_REL_GROUP

#define FAST_PATH_REL_GROUP (   rel)     (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))

Definition at line 217 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 252 of file lock.c.

◆ FAST_PATH_SLOT

#define FAST_PATH_SLOT (   group,
  index 
)
Value:
AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
int FastPathLockGroupsPerBackend
Definition: lock.c:202

Definition at line 224 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 300 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 301 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 303 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 405 of file lock.c.

◆ NLOCKENTS

Definition at line 56 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 406 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1860 of file lock.c.

1861{
1862 uint32 fasthashcode;
1863 LOCALLOCK *locallock = StrongLockInProgress;
1864
1865 if (locallock == NULL)
1866 return;
1867
1868 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1869 Assert(locallock->holdsStrongLockCount == true);
1871 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1872 FastPathStrongRelationLocks->count[fasthashcode]--;
1873 locallock->holdsStrongLockCount = false;
1874 StrongLockInProgress = NULL;
1876}
Assert(PointerIsAligned(start, uint64))
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:303
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:312
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:327
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:309
uint32 hashcode
Definition: lock.h:434
bool holdsStrongLockCount
Definition: lock.h:441

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3478 of file lock.c.

3479{
3480 HASH_SEQ_STATUS status;
3481 LOCALLOCK *locallock;
3482
3483 /* First, verify there aren't locks of both xact and session level */
3485
3486 /* Now do the per-locallock cleanup work */
3488
3489 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3490 {
3491 TwoPhaseLockRecord record;
3492 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3493 bool haveSessionLock;
3494 bool haveXactLock;
3495 int i;
3496
3497 /*
3498 * Ignore VXID locks. We don't want those to be held by prepared
3499 * transactions, since they aren't meaningful after a restart.
3500 */
3502 continue;
3503
3504 /* Ignore it if we don't actually hold the lock */
3505 if (locallock->nLocks <= 0)
3506 continue;
3507
3508 /* Scan to see whether we hold it at session or transaction level */
3509 haveSessionLock = haveXactLock = false;
3510 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3511 {
3512 if (lockOwners[i].owner == NULL)
3513 haveSessionLock = true;
3514 else
3515 haveXactLock = true;
3516 }
3517
3518 /* Ignore it if we have only session lock */
3519 if (!haveXactLock)
3520 continue;
3521
3522 /* This can't happen, because we already checked it */
3523 if (haveSessionLock)
3524 ereport(ERROR,
3525 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3526 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3527
3528 /*
3529 * If the local lock was taken via the fast-path, we need to move it
3530 * to the primary lock table, or just get a pointer to the existing
3531 * primary lock table entry if by chance it's already been
3532 * transferred.
3533 */
3534 if (locallock->proclock == NULL)
3535 {
3536 locallock->proclock = FastPathGetRelationLockEntry(locallock);
3537 locallock->lock = locallock->proclock->tag.myLock;
3538 }
3539
3540 /*
3541 * Arrange to not release any strong lock count held by this lock
3542 * entry. We must retain the count until the prepared transaction is
3543 * committed or rolled back.
3544 */
3545 locallock->holdsStrongLockCount = false;
3546
3547 /*
3548 * Create a 2PC record.
3549 */
3550 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3551 record.lockmode = locallock->tag.mode;
3552
3554 &record, sizeof(TwoPhaseLockRecord));
3555 }
3556}
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1415
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:150
int i
Definition: isn.c:77
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2958
static HTAB * LockMethodLocalHash
Definition: lock.c:323
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3390
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:145
LOCKTAG lock
Definition: lock.h:412
LOCKMODE mode
Definition: lock.h:413
LOCALLOCKOWNER * lockOwners
Definition: lock.h:440
LOCK * lock
Definition: lock.h:435
int64 nLocks
Definition: lock.h:437
int numLockOwners
Definition: lock.h:438
PROCLOCK * proclock
Definition: lock.h:436
LOCALLOCKTAG tag
Definition: lock.h:431
Definition: lock.h:167
uint8 locktag_type
Definition: lock.h:172
LOCK * myLock
Definition: lock.h:367
PROCLOCKTAG tag
Definition: lock.h:374
LOCKTAG locktag
Definition: lock.c:160
LOCKMODE lockmode
Definition: lock.c:161
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1271
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:27

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1824 of file lock.c.

1825{
1827 Assert(locallock->holdsStrongLockCount == false);
1828
1829 /*
1830 * Adding to a memory location is not atomic, so we take a spinlock to
1831 * ensure we don't collide with someone else trying to bump the count at
1832 * the same time.
1833 *
1834 * XXX: It might be worth considering using an atomic fetch-and-add
1835 * instruction here, on architectures where that is supported.
1836 */
1837
1839 FastPathStrongRelationLocks->count[fasthashcode]++;
1840 locallock->holdsStrongLockCount = true;
1841 StrongLockInProgress = locallock;
1843}

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1464 of file lock.c.

1465{
1466#ifdef USE_ASSERT_CHECKING
1467 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1468 IsRelationExtensionLockHeld = acquired;
1469#endif
1470}
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:140
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:446

References LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3390 of file lock.c.

3391{
3392 typedef struct
3393 {
3394 LOCKTAG lock; /* identifies the lockable object */
3395 bool sessLock; /* is any lockmode held at session level? */
3396 bool xactLock; /* is any lockmode held at xact level? */
3397 } PerLockTagEntry;
3398
3399 HASHCTL hash_ctl;
3400 HTAB *lockhtab;
3401 HASH_SEQ_STATUS status;
3402 LOCALLOCK *locallock;
3403
3404 /* Create a local hash table keyed by LOCKTAG only */
3405 hash_ctl.keysize = sizeof(LOCKTAG);
3406 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3407 hash_ctl.hcxt = CurrentMemoryContext;
3408
3409 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3410 256, /* arbitrary initial size */
3411 &hash_ctl,
3413
3414 /* Scan local lock table to find entries for each LOCKTAG */
3416
3417 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3418 {
3419 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3420 PerLockTagEntry *hentry;
3421 bool found;
3422 int i;
3423
3424 /*
3425 * Ignore VXID locks. We don't want those to be held by prepared
3426 * transactions, since they aren't meaningful after a restart.
3427 */
3429 continue;
3430
3431 /* Ignore it if we don't actually hold the lock */
3432 if (locallock->nLocks <= 0)
3433 continue;
3434
3435 /* Otherwise, find or make an entry in lockhtab */
3436 hentry = (PerLockTagEntry *) hash_search(lockhtab,
3437 &locallock->tag.lock,
3438 HASH_ENTER, &found);
3439 if (!found) /* initialize, if newly created */
3440 hentry->sessLock = hentry->xactLock = false;
3441
3442 /* Scan to see if we hold lock at session or xact level or both */
3443 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3444 {
3445 if (lockOwners[i].owner == NULL)
3446 hentry->sessLock = true;
3447 else
3448 hentry->xactLock = true;
3449 }
3450
3451 /*
3452 * We can throw error immediately when we see both types of locks; no
3453 * need to wait around to see if there are more violations.
3454 */
3455 if (hentry->sessLock && hentry->xactLock)
3456 ereport(ERROR,
3457 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3458 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3459 }
3460
3461 /* Success, so clean up */
3462 hash_destroy(lockhtab);
3463}
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:358
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
struct LOCKTAG LOCKTAG
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:222

References CurrentMemoryContext, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), HASHCTL::hcxt, i, HASHCTL::keysize, LOCALLOCKTAG::lock, LockMethodLocalHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1738 of file lock.c.

1741{
1742 /*
1743 * If this was my last hold on this lock, delete my entry in the proclock
1744 * table.
1745 */
1746 if (proclock->holdMask == 0)
1747 {
1748 uint32 proclock_hashcode;
1749
1750 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1751 dlist_delete(&proclock->lockLink);
1752 dlist_delete(&proclock->procLink);
1753 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1755 &(proclock->tag),
1756 proclock_hashcode,
1758 NULL))
1759 elog(PANIC, "proclock table corrupted");
1760 }
1761
1762 if (lock->nRequested == 0)
1763 {
1764 /*
1765 * The caller just released the last lock, so garbage-collect the lock
1766 * object.
1767 */
1768 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1771 &(lock->tag),
1772 hashcode,
1774 NULL))
1775 elog(PANIC, "lock table corrupted");
1776 }
1777 else if (wakeupNeeded)
1778 {
1779 /* There are waiters on this lock, so wake them up. */
1780 ProcLockWakeup(lockMethodTable, lock);
1781 }
1782}
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:965
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:226
@ HASH_REMOVE
Definition: hsearch.h:115
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:405
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:605
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:406
static HTAB * LockMethodLockHash
Definition: lock.c:321
static HTAB * LockMethodProcLockHash
Definition: lock.c:322
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1739
int nRequested
Definition: lock.h:321
LOCKTAG tag
Definition: lock.h:313
dlist_head procLocks
Definition: lock.h:318
LOCKMASK holdMask
Definition: lock.h:378
dlist_node lockLink
Definition: lock.h:380
dlist_node procLink
Definition: lock.h:381

References Assert(), dlist_delete(), dlist_is_empty(), elog, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 623 of file lock.c.

624{
625 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
626
627 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
628 return true;
629
630 return false;
631}
static const LockMethod LockMethods[]
Definition: lock.c:150
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
const LOCKMASK * conflictTab
Definition: lock.h:113

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2958 of file lock.c.

2959{
2960 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2961 LOCKTAG *locktag = &locallock->tag.lock;
2962 PROCLOCK *proclock = NULL;
2963 LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2964 Oid relid = locktag->locktag_field2;
2965 uint32 i,
2966 group;
2967
2968 /* fast-path group the lock belongs to */
2969 group = FAST_PATH_REL_GROUP(relid);
2970
2972
2973 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2974 {
2975 uint32 lockmode;
2976
2977 /* index into the whole per-backend array */
2978 uint32 f = FAST_PATH_SLOT(group, i);
2979
2980 /* Look for an allocated slot matching the given relid. */
2981 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2982 continue;
2983
2984 /* If we don't have a lock of the given mode, forget it! */
2985 lockmode = locallock->tag.mode;
2986 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2987 break;
2988
2989 /* Find or create lock object. */
2990 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2991
2992 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2993 locallock->hashcode, lockmode);
2994 if (!proclock)
2995 {
2996 LWLockRelease(partitionLock);
2998 ereport(ERROR,
2999 (errcode(ERRCODE_OUT_OF_MEMORY),
3000 errmsg("out of shared memory"),
3001 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3002 }
3003 GrantLock(proclock->tag.myLock, proclock, lockmode);
3004 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3005
3006 LWLockRelease(partitionLock);
3007
3008 /* No need to examine remaining slots. */
3009 break;
3010 }
3011
3013
3014 /* Lock may have already been transferred by some other backend. */
3015 if (proclock == NULL)
3016 {
3017 LOCK *lock;
3018 PROCLOCKTAG proclocktag;
3019 uint32 proclock_hashcode;
3020
3021 LWLockAcquire(partitionLock, LW_SHARED);
3022
3024 locktag,
3025 locallock->hashcode,
3026 HASH_FIND,
3027 NULL);
3028 if (!lock)
3029 elog(ERROR, "failed to re-find shared lock object");
3030
3031 proclocktag.myLock = lock;
3032 proclocktag.myProc = MyProc;
3033
3034 proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
3035 proclock = (PROCLOCK *)
3037 &proclocktag,
3038 proclock_hashcode,
3039 HASH_FIND,
3040 NULL);
3041 if (!proclock)
3042 elog(ERROR, "failed to re-find shared proclock object");
3043 LWLockRelease(partitionLock);
3044 }
3045
3046 return proclock;
3047}
int errhint(const char *fmt,...)
Definition: elog.c:1321
@ HASH_FIND
Definition: hsearch.h:113
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1283
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:256
#define FAST_PATH_REL_GROUP(rel)
Definition: lock.c:217
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1658
#define FAST_PATH_SLOT(group, index)
Definition: lock.c:224
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:254
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:245
#define LockHashPartitionLock(hashcode)
Definition: lock.h:528
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1174
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894
@ LW_SHARED
Definition: lwlock.h:113
@ LW_EXCLUSIVE
Definition: lwlock.h:112
unsigned int Oid
Definition: postgres_ext.h:32
PGPROC * MyProc
Definition: proc.c:66
uint32 locktag_field2
Definition: lock.h:169
Definition: lock.h:311
Definition: lwlock.h:42
LWLock fpInfoLock
Definition: proc.h:310
Oid * fpRelId
Definition: proc.h:312
PGPROC * myProc
Definition: lock.h:368
Definition: lock.h:372

References DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), LOCALLOCK::hashcode, i, LOCALLOCKTAG::lock, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, ProcLockHashCode(), SetupLockInTable(), PROCLOCK::tag, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2782 of file lock.c.

2783{
2784 uint32 i;
2785 uint32 unused_slot = FastPathLockSlotsPerBackend();
2786
2787 /* fast-path group the lock belongs to */
2788 uint32 group = FAST_PATH_REL_GROUP(relid);
2789
2790 /* Scan for existing entry for this relid, remembering empty slot. */
2791 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2792 {
2793 /* index into the whole per-backend array */
2794 uint32 f = FAST_PATH_SLOT(group, i);
2795
2796 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2797 unused_slot = f;
2798 else if (MyProc->fpRelId[f] == relid)
2799 {
2800 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2801 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2802 return true;
2803 }
2804 }
2805
2806 /* If no existing entry, use any empty slot. */
2807 if (unused_slot < FastPathLockSlotsPerBackend())
2808 {
2809 MyProc->fpRelId[unused_slot] = relid;
2810 FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2811 ++FastPathLocalUseCounts[group];
2812 return true;
2813 }
2814
2815 /* No existing entry, and no empty slot. */
2816 return false;
2817}
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:252
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition: lock.c:176

References Assert(), FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SET_LOCKMODE, FAST_PATH_SLOT, FastPathLocalUseCounts, FastPathLockSlotsPerBackend, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2861 of file lock.c.

2863{
2864 LWLock *partitionLock = LockHashPartitionLock(hashcode);
2865 Oid relid = locktag->locktag_field2;
2866 uint32 i;
2867
2868 /* fast-path group the lock belongs to */
2869 uint32 group = FAST_PATH_REL_GROUP(relid);
2870
2871 /*
2872 * Every PGPROC that can potentially hold a fast-path lock is present in
2873 * ProcGlobal->allProcs. Prepared transactions are not, but any
2874 * outstanding fast-path locks held by prepared transactions are
2875 * transferred to the main lock table.
2876 */
2877 for (i = 0; i < ProcGlobal->allProcCount; i++)
2878 {
2879 PGPROC *proc = &ProcGlobal->allProcs[i];
2880 uint32 j;
2881
2883
2884 /*
2885 * If the target backend isn't referencing the same database as the
2886 * lock, then we needn't examine the individual relation IDs at all;
2887 * none of them can be relevant.
2888 *
2889 * proc->databaseId is set at backend startup time and never changes
2890 * thereafter, so it might be safe to perform this test before
2891 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2892 * assume that if the target backend holds any fast-path locks, it
2893 * must have performed a memory-fencing operation (in particular, an
2894 * LWLock acquisition) since setting proc->databaseId. However, it's
2895 * less clear that our backend is certain to have performed a memory
2896 * fencing operation since the other backend set proc->databaseId. So
2897 * for now, we test it after acquiring the LWLock just to be safe.
2898 *
2899 * Also skip groups without any registered fast-path locks.
2900 */
2901 if (proc->databaseId != locktag->locktag_field1 ||
2902 proc->fpLockBits[group] == 0)
2903 {
2904 LWLockRelease(&proc->fpInfoLock);
2905 continue;
2906 }
2907
2908 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2909 {
2910 uint32 lockmode;
2911
2912 /* index into the whole per-backend array */
2913 uint32 f = FAST_PATH_SLOT(group, j);
2914
2915 /* Look for an allocated slot matching the given relid. */
2916 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2917 continue;
2918
2919 /* Find or create lock object. */
2920 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2921 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2923 ++lockmode)
2924 {
2925 PROCLOCK *proclock;
2926
2927 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2928 continue;
2929 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2930 hashcode, lockmode);
2931 if (!proclock)
2932 {
2933 LWLockRelease(partitionLock);
2934 LWLockRelease(&proc->fpInfoLock);
2935 return false;
2936 }
2937 GrantLock(proclock->tag.myLock, proclock, lockmode);
2938 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2939 }
2940 LWLockRelease(partitionLock);
2941
2942 /* No need to examine remaining slots. */
2943 break;
2944 }
2945 LWLockRelease(&proc->fpInfoLock);
2946 }
2947 return true;
2948}
int j
Definition: isn.c:78
PROC_HDR * ProcGlobal
Definition: proc.c:78
uint32 locktag_field1
Definition: lock.h:168
Definition: proc.h:179
Oid databaseId
Definition: proc.h:224
uint64 * fpLockBits
Definition: proc.h:311
PGPROC * allProcs
Definition: proc.h:388
uint32 allProcCount
Definition: proc.h:406

References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GrantLock(), i, j, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2825 of file lock.c.

2826{
2827 uint32 i;
2828 bool result = false;
2829
2830 /* fast-path group the lock belongs to */
2831 uint32 group = FAST_PATH_REL_GROUP(relid);
2832
2833 FastPathLocalUseCounts[group] = 0;
2834 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2835 {
2836 /* index into the whole per-backend array */
2837 uint32 f = FAST_PATH_SLOT(group, i);
2838
2839 if (MyProc->fpRelId[f] == relid
2840 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2841 {
2842 Assert(!result);
2843 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2844 result = true;
2845 /* we continue iterating so as to update FastPathLocalUseCount */
2846 }
2847 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2848 ++FastPathLocalUseCounts[group];
2849 }
2850 return result;
2851}

References Assert(), FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FastPathLocalUseCounts, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1850 of file lock.c.

1851{
1852 StrongLockInProgress = NULL;
1853}

References StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetAwaitedLock()

LOCALLOCK * GetAwaitedLock ( void  )

Definition at line 1898 of file lock.c.

1899{
1900 return awaitedLock;
1901}
static LOCALLOCK * awaitedLock
Definition: lock.c:328

References awaitedLock.

Referenced by LockErrorCleanup(), ProcessRecoveryConflictInterrupt(), and ProcSleep().

◆ GetBlockerStatusData()

BlockedProcsData * GetBlockerStatusData ( int  blocked_pid)

Definition at line 3998 of file lock.c.

3999{
4001 PGPROC *proc;
4002 int i;
4003
4005
4006 /*
4007 * Guess how much space we'll need, and preallocate. Most of the time
4008 * this will avoid needing to do repalloc while holding the LWLocks. (We
4009 * assume, but check with an Assert, that MaxBackends is enough entries
4010 * for the procs[] array; the other two could need enlargement, though.)
4011 */
4012 data->nprocs = data->nlocks = data->npids = 0;
4013 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
4014 data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
4015 data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
4016 data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
4017
4018 /*
4019 * In order to search the ProcArray for blocked_pid and assume that that
4020 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4021 * In addition, to examine the lock grouping fields of any other backend,
4022 * we must hold all the hash partition locks. (Only one of those locks is
4023 * actually relevant for any one lock group, but we can't know which one
4024 * ahead of time.) It's fairly annoying to hold all those locks
4025 * throughout this, but it's no worse than GetLockStatusData(), and it
4026 * does have the advantage that we're guaranteed to return a
4027 * self-consistent instantaneous state.
4028 */
4029 LWLockAcquire(ProcArrayLock, LW_SHARED);
4030
4031 proc = BackendPidGetProcWithLock(blocked_pid);
4032
4033 /* Nothing to do if it's gone */
4034 if (proc != NULL)
4035 {
4036 /*
4037 * Acquire lock on the entire shared lock data structure. See notes
4038 * in GetLockStatusData().
4039 */
4040 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4042
4043 if (proc->lockGroupLeader == NULL)
4044 {
4045 /* Easy case, proc is not a lock group member */
4047 }
4048 else
4049 {
4050 /* Examine all procs in proc's lock group */
4051 dlist_iter iter;
4052
4054 {
4055 PGPROC *memberProc;
4056
4057 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4059 }
4060 }
4061
4062 /*
4063 * And release locks. See notes in GetLockStatusData().
4064 */
4065 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4067
4068 Assert(data->nprocs <= data->maxprocs);
4069 }
4070
4071 LWLockRelease(ProcArrayLock);
4072
4073 return data;
4074}
int MaxBackends
Definition: globals.c:146
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:4078
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:531
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:95
void * palloc(Size size)
Definition: mcxt.c:1365
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3181
dlist_head lockGroupMembers
Definition: proc.h:322
PGPROC * lockGroupLeader
Definition: proc.h:321
dlist_node * cur
Definition: ilist.h:179

References Assert(), BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId * GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 3069 of file lock.c.

3070{
3071 static VirtualTransactionId *vxids;
3072 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
3073 LockMethod lockMethodTable;
3074 LOCK *lock;
3075 LOCKMASK conflictMask;
3076 dlist_iter proclock_iter;
3077 PROCLOCK *proclock;
3078 uint32 hashcode;
3079 LWLock *partitionLock;
3080 int count = 0;
3081 int fast_count = 0;
3082
3083 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
3084 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3085 lockMethodTable = LockMethods[lockmethodid];
3086 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
3087 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3088
3089 /*
3090 * Allocate memory to store results, and fill with InvalidVXID. We only
3091 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3092 * InHotStandby allocate once in TopMemoryContext.
3093 */
3094 if (InHotStandby)
3095 {
3096 if (vxids == NULL)
3097 vxids = (VirtualTransactionId *)
3099 sizeof(VirtualTransactionId) *
3101 }
3102 else
3103 vxids = (VirtualTransactionId *)
3106
3107 /* Compute hash code and partition lock, and look up conflicting modes. */
3108 hashcode = LockTagHashCode(locktag);
3109 partitionLock = LockHashPartitionLock(hashcode);
3110 conflictMask = lockMethodTable->conflictTab[lockmode];
3111
3112 /*
3113 * Fast path locks might not have been entered in the primary lock table.
3114 * If the lock we're dealing with could conflict with such a lock, we must
3115 * examine each backend's fast-path array for conflicts.
3116 */
3117 if (ConflictsWithRelationFastPath(locktag, lockmode))
3118 {
3119 int i;
3120 Oid relid = locktag->locktag_field2;
3122
3123 /* fast-path group the lock belongs to */
3124 uint32 group = FAST_PATH_REL_GROUP(relid);
3125
3126 /*
3127 * Iterate over relevant PGPROCs. Anything held by a prepared
3128 * transaction will have been transferred to the primary lock table,
3129 * so we need not worry about those. This is all a bit fuzzy, because
3130 * new locks could be taken after we've visited a particular
3131 * partition, but the callers had better be prepared to deal with that
3132 * anyway, since the locks could equally well be taken between the
3133 * time we return the value and the time the caller does something
3134 * with it.
3135 */
3136 for (i = 0; i < ProcGlobal->allProcCount; i++)
3137 {
3138 PGPROC *proc = &ProcGlobal->allProcs[i];
3139 uint32 j;
3140
3141 /* A backend never blocks itself */
3142 if (proc == MyProc)
3143 continue;
3144
3146
3147 /*
3148 * If the target backend isn't referencing the same database as
3149 * the lock, then we needn't examine the individual relation IDs
3150 * at all; none of them can be relevant.
3151 *
3152 * See FastPathTransferRelationLocks() for discussion of why we do
3153 * this test after acquiring the lock.
3154 *
3155 * Also skip groups without any registered fast-path locks.
3156 */
3157 if (proc->databaseId != locktag->locktag_field1 ||
3158 proc->fpLockBits[group] == 0)
3159 {
3160 LWLockRelease(&proc->fpInfoLock);
3161 continue;
3162 }
3163
3164 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3165 {
3166 uint32 lockmask;
3167
3168 /* index into the whole per-backend array */
3169 uint32 f = FAST_PATH_SLOT(group, j);
3170
3171 /* Look for an allocated slot matching the given relid. */
3172 if (relid != proc->fpRelId[f])
3173 continue;
3174 lockmask = FAST_PATH_GET_BITS(proc, f);
3175 if (!lockmask)
3176 continue;
3177 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3178
3179 /*
3180 * There can only be one entry per relation, so if we found it
3181 * and it doesn't conflict, we can skip the rest of the slots.
3182 */
3183 if ((lockmask & conflictMask) == 0)
3184 break;
3185
3186 /* Conflict! */
3187 GET_VXID_FROM_PGPROC(vxid, *proc);
3188
3190 vxids[count++] = vxid;
3191 /* else, xact already committed or aborted */
3192
3193 /* No need to examine remaining slots. */
3194 break;
3195 }
3196
3197 LWLockRelease(&proc->fpInfoLock);
3198 }
3199 }
3200
3201 /* Remember how many fast-path conflicts we found. */
3202 fast_count = count;
3203
3204 /*
3205 * Look up the lock object matching the tag.
3206 */
3207 LWLockAcquire(partitionLock, LW_SHARED);
3208
3210 locktag,
3211 hashcode,
3212 HASH_FIND,
3213 NULL);
3214 if (!lock)
3215 {
3216 /*
3217 * If the lock object doesn't exist, there is nothing holding a lock
3218 * on this lockable object.
3219 */
3220 LWLockRelease(partitionLock);
3221 vxids[count].procNumber = INVALID_PROC_NUMBER;
3223 if (countp)
3224 *countp = count;
3225 return vxids;
3226 }
3227
3228 /*
3229 * Examine each existing holder (or awaiter) of the lock.
3230 */
3231 dlist_foreach(proclock_iter, &lock->procLocks)
3232 {
3233 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3234
3235 if (conflictMask & proclock->holdMask)
3236 {
3237 PGPROC *proc = proclock->tag.myProc;
3238
3239 /* A backend never blocks itself */
3240 if (proc != MyProc)
3241 {
3243
3244 GET_VXID_FROM_PGPROC(vxid, *proc);
3245
3247 {
3248 int i;
3249
3250 /* Avoid duplicate entries. */
3251 for (i = 0; i < fast_count; ++i)
3252 if (VirtualTransactionIdEquals(vxids[i], vxid))
3253 break;
3254 if (i >= fast_count)
3255 vxids[count++] = vxid;
3256 }
3257 /* else, xact already committed or aborted */
3258 }
3259 }
3260 }
3261
3262 LWLockRelease(partitionLock);
3263
3264 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3265 elog(PANIC, "too many conflicting locks found");
3266
3267 vxids[count].procNumber = INVALID_PROC_NUMBER;
3269 if (countp)
3270 *countp = count;
3271 return vxids;
3272}
#define lengthof(array)
Definition: c.h:788
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:273
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:557
uint16 LOCKMETHODID
Definition: lock.h:124
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:69
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:79
#define InvalidLocalTransactionId
Definition: lock.h:67
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:73
int LOCKMASK
Definition: lockdefs.h:25
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void * palloc0(Size size)
Definition: mcxt.c:1395
MemoryContext TopMemoryContext
Definition: mcxt.c:166
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
uint8 locktag_lockmethodid
Definition: lock.h:173
int numLockModes
Definition: lock.h:112
LocalTransactionId localTransactionId
Definition: lock.h:64
ProcNumber procNumber
Definition: lock.h:63
int max_prepared_xacts
Definition: twophase.c:116
#define InHotStandby
Definition: xlogutils.h:60

References PROC_HDR::allProcCount, PROC_HDR::allProcs, ConflictsWithRelationFastPath, LockMethodData::conflictTab, dlist_iter::cur, PGPROC::databaseId, dlist_container, dlist_foreach, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, INVALID_PROC_NUMBER, InvalidLocalTransactionId, j, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, VirtualTransactionId::procNumber, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char * GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4255 of file lock.c.

4256{
4257 Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4258 Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4259 return LockMethods[lockmethodid]->lockModeNames[mode];
4260}
const char *const * lockModeNames
Definition: lock.h:114

References Assert(), lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by DeadLockReport(), LockAcquireExtended(), overexplain_range_table(), pg_lock_status(), ProcSleep(), and waitonlock_error_callback().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 527 of file lock.c.

528{
529 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
530
531 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
532 return LockMethods[lockmethodid];
533}
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:326

References Assert(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData * GetLockStatusData ( void  )

Definition at line 3795 of file lock.c.

3796{
3797 LockData *data;
3798 PROCLOCK *proclock;
3799 HASH_SEQ_STATUS seqstat;
3800 int els;
3801 int el;
3802 int i;
3803
3804 data = (LockData *) palloc(sizeof(LockData));
3805
3806 /* Guess how much space we'll need. */
3807 els = MaxBackends;
3808 el = 0;
3809 data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3810
3811 /*
3812 * First, we iterate through the per-backend fast-path arrays, locking
3813 * them one at a time. This might produce an inconsistent picture of the
3814 * system state, but taking all of those LWLocks at the same time seems
3815 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3816 * matter too much, because none of these locks can be involved in lock
3817 * conflicts anyway - anything that might must be present in the main lock
3818 * table. (For the same reason, we don't sweat about making leaderPid
3819 * completely valid. We cannot safely dereference another backend's
3820 * lockGroupLeader field without holding all lock partition locks, and
3821 * it's not worth that.)
3822 */
3823 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3824 {
3825 PGPROC *proc = &ProcGlobal->allProcs[i];
3826
3827 /* Skip backends with pid=0, as they don't hold fast-path locks */
3828 if (proc->pid == 0)
3829 continue;
3830
3832
3833 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3834 {
3835 /* Skip groups without registered fast-path locks */
3836 if (proc->fpLockBits[g] == 0)
3837 continue;
3838
3839 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3840 {
3841 LockInstanceData *instance;
3842 uint32 f = FAST_PATH_SLOT(g, j);
3843 uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3844
3845 /* Skip unallocated slots */
3846 if (!lockbits)
3847 continue;
3848
3849 if (el >= els)
3850 {
3851 els += MaxBackends;
3852 data->locks = (LockInstanceData *)
3853 repalloc(data->locks, sizeof(LockInstanceData) * els);
3854 }
3855
3856 instance = &data->locks[el];
3857 SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3858 proc->fpRelId[f]);
3859 instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3860 instance->waitLockMode = NoLock;
3861 instance->vxid.procNumber = proc->vxid.procNumber;
3862 instance->vxid.localTransactionId = proc->vxid.lxid;
3863 instance->pid = proc->pid;
3864 instance->leaderPid = proc->pid;
3865 instance->fastpath = true;
3866
3867 /*
3868 * Successfully taking fast path lock means there were no
3869 * conflicting locks.
3870 */
3871 instance->waitStart = 0;
3872
3873 el++;
3874 }
3875 }
3876
3877 if (proc->fpVXIDLock)
3878 {
3880 LockInstanceData *instance;
3881
3882 if (el >= els)
3883 {
3884 els += MaxBackends;
3885 data->locks = (LockInstanceData *)
3886 repalloc(data->locks, sizeof(LockInstanceData) * els);
3887 }
3888
3889 vxid.procNumber = proc->vxid.procNumber;
3891
3892 instance = &data->locks[el];
3893 SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3894 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3895 instance->waitLockMode = NoLock;
3896 instance->vxid.procNumber = proc->vxid.procNumber;
3897 instance->vxid.localTransactionId = proc->vxid.lxid;
3898 instance->pid = proc->pid;
3899 instance->leaderPid = proc->pid;
3900 instance->fastpath = true;
3901 instance->waitStart = 0;
3902
3903 el++;
3904 }
3905
3906 LWLockRelease(&proc->fpInfoLock);
3907 }
3908
3909 /*
3910 * Next, acquire lock on the entire shared lock data structure. We do
3911 * this so that, at least for locks in the primary lock table, the state
3912 * will be self-consistent.
3913 *
3914 * Since this is a read-only operation, we take shared instead of
3915 * exclusive lock. There's not a whole lot of point to this, because all
3916 * the normal operations require exclusive lock, but it doesn't hurt
3917 * anything either. It will at least allow two backends to do
3918 * GetLockStatusData in parallel.
3919 *
3920 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3921 */
3922 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3924
3925 /* Now we can safely count the number of proclocks */
3927 if (data->nelements > els)
3928 {
3929 els = data->nelements;
3930 data->locks = (LockInstanceData *)
3931 repalloc(data->locks, sizeof(LockInstanceData) * els);
3932 }
3933
3934 /* Now scan the tables to copy the data */
3936
3937 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3938 {
3939 PGPROC *proc = proclock->tag.myProc;
3940 LOCK *lock = proclock->tag.myLock;
3941 LockInstanceData *instance = &data->locks[el];
3942
3943 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3944 instance->holdMask = proclock->holdMask;
3945 if (proc->waitLock == proclock->tag.myLock)
3946 instance->waitLockMode = proc->waitLockMode;
3947 else
3948 instance->waitLockMode = NoLock;
3949 instance->vxid.procNumber = proc->vxid.procNumber;
3950 instance->vxid.localTransactionId = proc->vxid.lxid;
3951 instance->pid = proc->pid;
3952 instance->leaderPid = proclock->groupLeader->pid;
3953 instance->fastpath = false;
3954 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3955
3956 el++;
3957 }
3958
3959 /*
3960 * And release locks. We do this in reverse order for two reasons: (1)
3961 * Anyone else who needs more than one of the locks will be trying to lock
3962 * them in increasing order; we don't want to release the other process
3963 * until it can get all the locks it needs. (2) This avoids O(N^2)
3964 * behavior inside LWLockRelease.
3965 */
3966 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3968
3969 Assert(el == data->nelements);
3970
3971 return data;
3972}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:465
int64 TimestampTz
Definition: timestamp.h:39
int64 hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1336
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:237
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:183
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1610
Definition: lock.h:468
LOCKMASK holdMask
Definition: lock.h:457
LOCKMODE waitLockMode
Definition: lock.h:458
bool fastpath
Definition: lock.h:464
LOCKTAG locktag
Definition: lock.h:456
TimestampTz waitStart
Definition: lock.h:460
int leaderPid
Definition: lock.h:463
VirtualTransactionId vxid
Definition: lock.h:459
LocalTransactionId lxid
Definition: proc.h:217
struct PGPROC::@128 vxid
pg_atomic_uint64 waitStart
Definition: proc.h:254
bool fpVXIDLock
Definition: proc.h:313
ProcNumber procNumber
Definition: proc.h:212
int pid
Definition: proc.h:199
LOCK * waitLock
Definition: proc.h:249
LOCKMODE waitLockMode
Definition: proc.h:251
LocalTransactionId fpLocalTransactionId
Definition: proc.h:314
PGPROC * groupLeader
Definition: lock.h:377

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert(), data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_SLOT, LockInstanceData::fastpath, FastPathLockGroupsPerBackend, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpLockBits, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, j, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 539 of file lock.c.

540{
541 LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
542
543 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
544 return LockMethods[lockmethodid];
545}

References Assert(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock * GetRunningTransactionLocks ( int *  nlocks)

Definition at line 4173 of file lock.c.

4174{
4175 xl_standby_lock *accessExclusiveLocks;
4176 PROCLOCK *proclock;
4177 HASH_SEQ_STATUS seqstat;
4178 int i;
4179 int index;
4180 int els;
4181
4182 /*
4183 * Acquire lock on the entire shared lock data structure.
4184 *
4185 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4186 */
4187 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4189
4190 /* Now we can safely count the number of proclocks */
4192
4193 /*
4194 * Allocating enough space for all locks in the lock table is overkill,
4195 * but it's more convenient and faster than having to enlarge the array.
4196 */
4197 accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4198
4199 /* Now scan the tables to copy the data */
4201
4202 /*
4203 * If lock is a currently granted AccessExclusiveLock then it will have
4204 * just one proclock holder, so locks are never accessed twice in this
4205 * particular case. Don't copy this code for use elsewhere because in the
4206 * general case this will give you duplicate locks when looking at
4207 * non-exclusive lock types.
4208 */
4209 index = 0;
4210 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4211 {
4212 /* make sure this definition matches the one used in LockAcquire */
4213 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4215 {
4216 PGPROC *proc = proclock->tag.myProc;
4217 LOCK *lock = proclock->tag.myLock;
4218 TransactionId xid = proc->xid;
4219
4220 /*
4221 * Don't record locks for transactions if we know they have
4222 * already issued their WAL record for commit but not yet released
4223 * lock. It is still possible that we see locks held by already
4224 * complete transactions, if they haven't yet zeroed their xids.
4225 */
4226 if (!TransactionIdIsValid(xid))
4227 continue;
4228
4229 accessExclusiveLocks[index].xid = xid;
4230 accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4231 accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4232
4233 index++;
4234 }
4235 }
4236
4237 Assert(index <= els);
4238
4239 /*
4240 * And release locks. We do this in reverse order for two reasons: (1)
4241 * Anyone else who needs more than one of the locks will be trying to lock
4242 * them in increasing order; we don't want to release the other process
4243 * until it can get all the locks it needs. (2) This avoids O(N^2)
4244 * behavior inside LWLockRelease.
4245 */
4246 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4248
4249 *nlocks = index;
4250 return accessExclusiveLocks;
4251}
uint32 TransactionId
Definition: c.h:658
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:189
TransactionId xid
Definition: lockdefs.h:53
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert(), xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 4078 of file lock.c.

4079{
4080 LOCK *theLock = blocked_proc->waitLock;
4081 BlockedProcData *bproc;
4082 dlist_iter proclock_iter;
4083 dlist_iter proc_iter;
4084 dclist_head *waitQueue;
4085 int queue_size;
4086
4087 /* Nothing to do if this proc is not blocked */
4088 if (theLock == NULL)
4089 return;
4090
4091 /* Set up a procs[] element */
4092 bproc = &data->procs[data->nprocs++];
4093 bproc->pid = blocked_proc->pid;
4094 bproc->first_lock = data->nlocks;
4095 bproc->first_waiter = data->npids;
4096
4097 /*
4098 * We may ignore the proc's fast-path arrays, since nothing in those could
4099 * be related to a contended lock.
4100 */
4101
4102 /* Collect all PROCLOCKs associated with theLock */
4103 dlist_foreach(proclock_iter, &theLock->procLocks)
4104 {
4105 PROCLOCK *proclock =
4106 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4107 PGPROC *proc = proclock->tag.myProc;
4108 LOCK *lock = proclock->tag.myLock;
4109 LockInstanceData *instance;
4110
4111 if (data->nlocks >= data->maxlocks)
4112 {
4113 data->maxlocks += MaxBackends;
4114 data->locks = (LockInstanceData *)
4115 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4116 }
4117
4118 instance = &data->locks[data->nlocks];
4119 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4120 instance->holdMask = proclock->holdMask;
4121 if (proc->waitLock == lock)
4122 instance->waitLockMode = proc->waitLockMode;
4123 else
4124 instance->waitLockMode = NoLock;
4125 instance->vxid.procNumber = proc->vxid.procNumber;
4126 instance->vxid.localTransactionId = proc->vxid.lxid;
4127 instance->pid = proc->pid;
4128 instance->leaderPid = proclock->groupLeader->pid;
4129 instance->fastpath = false;
4130 data->nlocks++;
4131 }
4132
4133 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4134 waitQueue = &(theLock->waitProcs);
4135 queue_size = dclist_count(waitQueue);
4136
4137 if (queue_size > data->maxpids - data->npids)
4138 {
4139 data->maxpids = Max(data->maxpids + MaxBackends,
4140 data->npids + queue_size);
4141 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4142 sizeof(int) * data->maxpids);
4143 }
4144
4145 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4146 dclist_foreach(proc_iter, waitQueue)
4147 {
4148 PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4149
4150 if (queued_proc == blocked_proc)
4151 break;
4152 data->waiter_pids[data->npids++] = queued_proc->pid;
4153 queued_proc = (PGPROC *) queued_proc->links.next;
4154 }
4155
4156 bproc->num_locks = data->nlocks - bproc->first_lock;
4157 bproc->num_waiters = data->npids - bproc->first_waiter;
4158}
#define Max(x, y)
Definition: c.h:998
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int first_lock
Definition: lock.h:478
int first_waiter
Definition: lock.h:482
int num_waiters
Definition: lock.h:483
int num_locks
Definition: lock.h:479
dclist_head waitProcs
Definition: lock.h:319
dlist_node links
Definition: proc.h:180
dlist_node * next
Definition: ilist.h:140
static struct link * links
Definition: zic.c:299

References dlist_iter::cur, data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, LockInstanceData::fastpath, BlockedProcData::first_lock, BlockedProcData::first_waiter, PROCLOCK::groupLeader, PROCLOCK::holdMask, LockInstanceData::holdMask, LockInstanceData::leaderPid, PGPROC::links, links, VirtualTransactionId::localTransactionId, LockInstanceData::locktag, PGPROC::lxid, Max, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, dlist_node::next, NoLock, BlockedProcData::num_locks, BlockedProcData::num_waiters, LockInstanceData::pid, BlockedProcData::pid, PGPROC::pid, LOCK::procLocks, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, and LOCK::waitProcs.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1889 of file lock.c.

1890{
1892}
static ResourceOwner awaitedOwner
Definition: lock.c:329
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1792

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1658 of file lock.c.

1659{
1660 lock->nGranted++;
1661 lock->granted[lockmode]++;
1662 lock->grantMask |= LOCKBIT_ON(lockmode);
1663 if (lock->granted[lockmode] == lock->requested[lockmode])
1664 lock->waitMask &= LOCKBIT_OFF(lockmode);
1665 proclock->holdMask |= LOCKBIT_ON(lockmode);
1666 LOCK_PRINT("GrantLock", lock, lockmode);
1667 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1668 Assert(lock->nGranted <= lock->nRequested);
1669}
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:87
int requested[MAX_LOCKMODES]
Definition: lock.h:320
int granted[MAX_LOCKMODES]
Definition: lock.h:322
LOCKMASK grantMask
Definition: lock.h:316
LOCKMASK waitMask
Definition: lock.h:317
int nGranted
Definition: lock.h:323

References Assert(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), JoinWaitQueue(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1792 of file lock.c.

1793{
1794 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1795 int i;
1796
1797 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1798 /* Count the total */
1799 locallock->nLocks++;
1800 /* Count the per-owner lock */
1801 for (i = 0; i < locallock->numLockOwners; i++)
1802 {
1803 if (lockOwners[i].owner == owner)
1804 {
1805 lockOwners[i].nLocks++;
1806 return;
1807 }
1808 }
1809 lockOwners[i].owner = owner;
1810 lockOwners[i].nLocks = 1;
1811 locallock->numLockOwners++;
1812 if (owner != NULL)
1813 ResourceOwnerRememberLock(owner, locallock);
1814
1815 /* Indicate that the lock is acquired for certain types of locks. */
1816 CheckAndSetLockHeld(locallock, true);
1817}
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1464
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1059
int64 nLocks
Definition: lock.h:425
struct ResourceOwnerData * owner
Definition: lock.h:424
int maxLockOwners
Definition: lock.h:439

References Assert(), CheckAndSetLockHeld(), i, LOCALLOCK::lockOwners, LOCALLOCK::maxLockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLockManagerAccess()

void InitLockManagerAccess ( void  )

Definition at line 505 of file lock.c.

506{
507 /*
508 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
509 * counts and resource owner information.
510 */
511 HASHCTL info;
512
513 info.keysize = sizeof(LOCALLOCKTAG);
514 info.entrysize = sizeof(LOCALLOCK);
515
516 LockMethodLocalHash = hash_create("LOCALLOCK hash",
517 16,
518 &info,
520}
struct LOCALLOCK LOCALLOCK
struct LOCALLOCKTAG LOCALLOCKTAG

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, and LockMethodLocalHash.

Referenced by BaseInit().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( FullTransactionId  fxid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4598 of file lock.c.

4600{
4601 lock_twophase_postcommit(fxid, info, recdata, len);
4602}
void lock_twophase_postcommit(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4572
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( FullTransactionId  fxid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4572 of file lock.c.

4574{
4575 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4576 PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4577 LOCKTAG *locktag;
4578 LOCKMETHODID lockmethodid;
4579 LockMethod lockMethodTable;
4580
4581 Assert(len == sizeof(TwoPhaseLockRecord));
4582 locktag = &rec->locktag;
4583 lockmethodid = locktag->locktag_lockmethodid;
4584
4585 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4586 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4587 lockMethodTable = LockMethods[lockmethodid];
4588
4589 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4590}
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3286
PGPROC * TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
Definition: twophase.c:923

References Assert(), elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( FullTransactionId  fxid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4359 of file lock.c.

4361{
4362 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4363 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4364 LOCKTAG *locktag;
4365 LOCKMODE lockmode;
4366 LOCKMETHODID lockmethodid;
4367 LOCK *lock;
4368 PROCLOCK *proclock;
4369 PROCLOCKTAG proclocktag;
4370 bool found;
4371 uint32 hashcode;
4372 uint32 proclock_hashcode;
4373 int partition;
4374 LWLock *partitionLock;
4375 LockMethod lockMethodTable;
4376
4377 Assert(len == sizeof(TwoPhaseLockRecord));
4378 locktag = &rec->locktag;
4379 lockmode = rec->lockmode;
4380 lockmethodid = locktag->locktag_lockmethodid;
4381
4382 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4383 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4384 lockMethodTable = LockMethods[lockmethodid];
4385
4386 hashcode = LockTagHashCode(locktag);
4387 partition = LockHashPartition(hashcode);
4388 partitionLock = LockHashPartitionLock(hashcode);
4389
4390 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4391
4392 /*
4393 * Find or create a lock with this tag.
4394 */
4396 locktag,
4397 hashcode,
4399 &found);
4400 if (!lock)
4401 {
4402 LWLockRelease(partitionLock);
4403 ereport(ERROR,
4404 (errcode(ERRCODE_OUT_OF_MEMORY),
4405 errmsg("out of shared memory"),
4406 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4407 }
4408
4409 /*
4410 * if it's a new lock object, initialize it
4411 */
4412 if (!found)
4413 {
4414 lock->grantMask = 0;
4415 lock->waitMask = 0;
4416 dlist_init(&lock->procLocks);
4417 dclist_init(&lock->waitProcs);
4418 lock->nRequested = 0;
4419 lock->nGranted = 0;
4420 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4421 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4422 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4423 }
4424 else
4425 {
4426 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4427 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4428 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4429 Assert(lock->nGranted <= lock->nRequested);
4430 }
4431
4432 /*
4433 * Create the hash key for the proclock table.
4434 */
4435 proclocktag.myLock = lock;
4436 proclocktag.myProc = proc;
4437
4438 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4439
4440 /*
4441 * Find or create a proclock entry with this tag
4442 */
4444 &proclocktag,
4445 proclock_hashcode,
4447 &found);
4448 if (!proclock)
4449 {
4450 /* Oops, not enough shmem for the proclock */
4451 if (lock->nRequested == 0)
4452 {
4453 /*
4454 * There are no other requestors of this lock, so garbage-collect
4455 * the lock object. We *must* do this to avoid a permanent leak
4456 * of shared memory, because there won't be anything to cause
4457 * anyone to release the lock object later.
4458 */
4461 &(lock->tag),
4462 hashcode,
4464 NULL))
4465 elog(PANIC, "lock table corrupted");
4466 }
4467 LWLockRelease(partitionLock);
4468 ereport(ERROR,
4469 (errcode(ERRCODE_OUT_OF_MEMORY),
4470 errmsg("out of shared memory"),
4471 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4472 }
4473
4474 /*
4475 * If new, initialize the new entry
4476 */
4477 if (!found)
4478 {
4479 Assert(proc->lockGroupLeader == NULL);
4480 proclock->groupLeader = proc;
4481 proclock->holdMask = 0;
4482 proclock->releaseMask = 0;
4483 /* Add proclock to appropriate lists */
4484 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4485 dlist_push_tail(&proc->myProcLocks[partition],
4486 &proclock->procLink);
4487 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4488 }
4489 else
4490 {
4491 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4492 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4493 }
4494
4495 /*
4496 * lock->nRequested and lock->requested[] count the total number of
4497 * requests, whether granted or waiting, so increment those immediately.
4498 */
4499 lock->nRequested++;
4500 lock->requested[lockmode]++;
4501 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4502
4503 /*
4504 * We shouldn't already hold the desired lock.
4505 */
4506 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4507 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4508 lockMethodTable->lockModeNames[lockmode],
4509 lock->tag.locktag_field1, lock->tag.locktag_field2,
4510 lock->tag.locktag_field3);
4511
4512 /*
4513 * We ignore any possible conflicts and just grant ourselves the lock. Not
4514 * only because we don't bother, but also to avoid deadlocks when
4515 * switching from standby to normal mode. See function comment.
4516 */
4517 GrantLock(lock, proclock, lockmode);
4518
4519 /*
4520 * Bump strong lock count, to make sure any fast-path lock requests won't
4521 * be granted without consulting the primary lock table.
4522 */
4523 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4524 {
4525 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4526
4528 FastPathStrongRelationLocks->count[fasthashcode]++;
4530 }
4531
4532 LWLockRelease(partitionLock);
4533}
#define MemSet(start, val, len)
Definition: c.h:1020
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define MAX_LOCKMODES
Definition: lock.h:84
#define LockHashPartition(hashcode)
Definition: lock.h:526
int LOCKMODE
Definition: lockdefs.h:26
uint32 locktag_field3
Definition: lock.h:170
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:278
LOCKMASK releaseMask
Definition: lock.h:379

References Assert(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( FullTransactionId  fxid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4540 of file lock.c.

4542{
4543 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4544 LOCKTAG *locktag;
4545 LOCKMODE lockmode;
4546 LOCKMETHODID lockmethodid;
4547
4548 Assert(len == sizeof(TwoPhaseLockRecord));
4549 locktag = &rec->locktag;
4550 lockmode = rec->lockmode;
4551 lockmethodid = locktag->locktag_lockmethodid;
4552
4553 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4554 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4555
4556 if (lockmode == AccessExclusiveLock &&
4557 locktag->locktag_type == LOCKTAG_RELATION)
4558 {
4560 locktag->locktag_field1 /* dboid */ ,
4561 locktag->locktag_field2 /* reloid */ );
4562 }
4563}
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:986
#define XidFromFullTransactionId(x)
Definition: transam.h:48

References AccessExclusiveLock, Assert(), elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, StandbyAcquireAccessExclusiveLock(), and XidFromFullTransactionId.

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp,
bool  logLockFailure 
)

Definition at line 836 of file lock.c.

843{
844 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
845 LockMethod lockMethodTable;
846 LOCALLOCKTAG localtag;
847 LOCALLOCK *locallock;
848 LOCK *lock;
849 PROCLOCK *proclock;
850 bool found;
851 ResourceOwner owner;
852 uint32 hashcode;
853 LWLock *partitionLock;
854 bool found_conflict;
855 ProcWaitStatus waitResult;
856 bool log_lock = false;
857
858 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
859 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
860 lockMethodTable = LockMethods[lockmethodid];
861 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
862 elog(ERROR, "unrecognized lock mode: %d", lockmode);
863
864 if (RecoveryInProgress() && !InRecovery &&
865 (locktag->locktag_type == LOCKTAG_OBJECT ||
866 locktag->locktag_type == LOCKTAG_RELATION) &&
867 lockmode > RowExclusiveLock)
869 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
870 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
871 lockMethodTable->lockModeNames[lockmode]),
872 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
873
874#ifdef LOCK_DEBUG
875 if (LOCK_DEBUG_ENABLED(locktag))
876 elog(LOG, "LockAcquire: lock [%u,%u] %s",
877 locktag->locktag_field1, locktag->locktag_field2,
878 lockMethodTable->lockModeNames[lockmode]);
879#endif
880
881 /* Identify owner for lock */
882 if (sessionLock)
883 owner = NULL;
884 else
885 owner = CurrentResourceOwner;
886
887 /*
888 * Find or create a LOCALLOCK entry for this lock and lockmode
889 */
890 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
891 localtag.lock = *locktag;
892 localtag.mode = lockmode;
893
895 &localtag,
896 HASH_ENTER, &found);
897
898 /*
899 * if it's a new locallock object, initialize it
900 */
901 if (!found)
902 {
903 locallock->lock = NULL;
904 locallock->proclock = NULL;
905 locallock->hashcode = LockTagHashCode(&(localtag.lock));
906 locallock->nLocks = 0;
907 locallock->holdsStrongLockCount = false;
908 locallock->lockCleared = false;
909 locallock->numLockOwners = 0;
910 locallock->maxLockOwners = 8;
911 locallock->lockOwners = NULL; /* in case next line fails */
912 locallock->lockOwners = (LOCALLOCKOWNER *)
914 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
915 }
916 else
917 {
918 /* Make sure there will be room to remember the lock */
919 if (locallock->numLockOwners >= locallock->maxLockOwners)
920 {
921 int newsize = locallock->maxLockOwners * 2;
922
923 locallock->lockOwners = (LOCALLOCKOWNER *)
924 repalloc(locallock->lockOwners,
925 newsize * sizeof(LOCALLOCKOWNER));
926 locallock->maxLockOwners = newsize;
927 }
928 }
929 hashcode = locallock->hashcode;
930
931 if (locallockp)
932 *locallockp = locallock;
933
934 /*
935 * If we already hold the lock, we can just increase the count locally.
936 *
937 * If lockCleared is already set, caller need not worry about absorbing
938 * sinval messages related to the lock's object.
939 */
940 if (locallock->nLocks > 0)
941 {
942 GrantLockLocal(locallock, owner);
943 if (locallock->lockCleared)
945 else
947 }
948
949 /*
950 * We don't acquire any other heavyweight lock while holding the relation
951 * extension lock. We do allow to acquire the same relation extension
952 * lock more than once but that case won't reach here.
953 */
954 Assert(!IsRelationExtensionLockHeld);
955
956 /*
957 * Prepare to emit a WAL record if acquisition of this lock needs to be
958 * replayed in a standby server.
959 *
960 * Here we prepare to log; after lock is acquired we'll issue log record.
961 * This arrangement simplifies error recovery in case the preparation step
962 * fails.
963 *
964 * Only AccessExclusiveLocks can conflict with lock types that read-only
965 * transactions can acquire in a standby server. Make sure this definition
966 * matches the one in GetRunningTransactionLocks().
967 */
968 if (lockmode >= AccessExclusiveLock &&
969 locktag->locktag_type == LOCKTAG_RELATION &&
972 {
974 log_lock = true;
975 }
976
977 /*
978 * Attempt to take lock via fast path, if eligible. But if we remember
979 * having filled up the fast path array, we don't attempt to make any
980 * further use of it until we release some locks. It's possible that some
981 * other backend has transferred some of those locks to the shared hash
982 * table, leaving space free, but it's not worth acquiring the LWLock just
983 * to check. It's also possible that we're acquiring a second or third
984 * lock type on a relation we have already locked using the fast-path, but
985 * for now we don't worry about that case either.
986 */
987 if (EligibleForRelationFastPath(locktag, lockmode) &&
989 {
990 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
991 bool acquired;
992
993 /*
994 * LWLockAcquire acts as a memory sequencing point, so it's safe to
995 * assume that any strong locker whose increment to
996 * FastPathStrongRelationLocks->counts becomes visible after we test
997 * it has yet to begin to transfer fast-path locks.
998 */
1000 if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
1001 acquired = false;
1002 else
1003 acquired = FastPathGrantRelationLock(locktag->locktag_field2,
1004 lockmode);
1006 if (acquired)
1007 {
1008 /*
1009 * The locallock might contain stale pointers to some old shared
1010 * objects; we MUST reset these to null before considering the
1011 * lock to be acquired via fast-path.
1012 */
1013 locallock->lock = NULL;
1014 locallock->proclock = NULL;
1015 GrantLockLocal(locallock, owner);
1016 return LOCKACQUIRE_OK;
1017 }
1018 }
1019
1020 /*
1021 * If this lock could potentially have been taken via the fast-path by
1022 * some other backend, we must (temporarily) disable further use of the
1023 * fast-path for this lock tag, and migrate any locks already taken via
1024 * this method to the main lock table.
1025 */
1026 if (ConflictsWithRelationFastPath(locktag, lockmode))
1027 {
1028 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1029
1030 BeginStrongLockAcquire(locallock, fasthashcode);
1031 if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1032 hashcode))
1033 {
1035 if (locallock->nLocks == 0)
1036 RemoveLocalLock(locallock);
1037 if (locallockp)
1038 *locallockp = NULL;
1039 if (reportMemoryError)
1040 ereport(ERROR,
1041 (errcode(ERRCODE_OUT_OF_MEMORY),
1042 errmsg("out of shared memory"),
1043 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1044 else
1045 return LOCKACQUIRE_NOT_AVAIL;
1046 }
1047 }
1048
1049 /*
1050 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1051 * take it via the fast-path, either, so we've got to mess with the shared
1052 * lock table.
1053 */
1054 partitionLock = LockHashPartitionLock(hashcode);
1055
1056 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1057
1058 /*
1059 * Find or create lock and proclock entries with this tag
1060 *
1061 * Note: if the locallock object already existed, it might have a pointer
1062 * to the lock already ... but we should not assume that that pointer is
1063 * valid, since a lock object with zero hold and request counts can go
1064 * away anytime. So we have to use SetupLockInTable() to recompute the
1065 * lock and proclock pointers, even if they're already set.
1066 */
1067 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1068 hashcode, lockmode);
1069 if (!proclock)
1070 {
1072 LWLockRelease(partitionLock);
1073 if (locallock->nLocks == 0)
1074 RemoveLocalLock(locallock);
1075 if (locallockp)
1076 *locallockp = NULL;
1077 if (reportMemoryError)
1078 ereport(ERROR,
1079 (errcode(ERRCODE_OUT_OF_MEMORY),
1080 errmsg("out of shared memory"),
1081 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1082 else
1083 return LOCKACQUIRE_NOT_AVAIL;
1084 }
1085 locallock->proclock = proclock;
1086 lock = proclock->tag.myLock;
1087 locallock->lock = lock;
1088
1089 /*
1090 * If lock requested conflicts with locks requested by waiters, must join
1091 * wait queue. Otherwise, check for conflict with already-held locks.
1092 * (That's last because most complex check.)
1093 */
1094 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1095 found_conflict = true;
1096 else
1097 found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1098 lock, proclock);
1099
1100 if (!found_conflict)
1101 {
1102 /* No conflict with held or previously requested locks */
1103 GrantLock(lock, proclock, lockmode);
1104 waitResult = PROC_WAIT_STATUS_OK;
1105 }
1106 else
1107 {
1108 /*
1109 * Join the lock's wait queue. We call this even in the dontWait
1110 * case, because JoinWaitQueue() may discover that we can acquire the
1111 * lock immediately after all.
1112 */
1113 waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1114 }
1115
1116 if (waitResult == PROC_WAIT_STATUS_ERROR)
1117 {
1118 /*
1119 * We're not getting the lock because a deadlock was detected already
1120 * while trying to join the wait queue, or because we would have to
1121 * wait but the caller requested no blocking.
1122 *
1123 * Undo the changes to shared entries before releasing the partition
1124 * lock.
1125 */
1127
1128 if (proclock->holdMask == 0)
1129 {
1130 uint32 proclock_hashcode;
1131
1132 proclock_hashcode = ProcLockHashCode(&proclock->tag,
1133 hashcode);
1134 dlist_delete(&proclock->lockLink);
1135 dlist_delete(&proclock->procLink);
1137 &(proclock->tag),
1138 proclock_hashcode,
1140 NULL))
1141 elog(PANIC, "proclock table corrupted");
1142 }
1143 else
1144 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1145 lock->nRequested--;
1146 lock->requested[lockmode]--;
1147 LOCK_PRINT("LockAcquire: did not join wait queue",
1148 lock, lockmode);
1149 Assert((lock->nRequested > 0) &&
1150 (lock->requested[lockmode] >= 0));
1151 Assert(lock->nGranted <= lock->nRequested);
1152 LWLockRelease(partitionLock);
1153 if (locallock->nLocks == 0)
1154 RemoveLocalLock(locallock);
1155
1156 if (dontWait)
1157 {
1158 /*
1159 * Log lock holders and waiters as a detail log message if
1160 * logLockFailure = true and lock acquisition fails with dontWait
1161 * = true
1162 */
1163 if (logLockFailure)
1164 {
1166 lock_waiters_sbuf,
1167 lock_holders_sbuf;
1168 const char *modename;
1169 int lockHoldersNum = 0;
1170
1172 initStringInfo(&lock_waiters_sbuf);
1173 initStringInfo(&lock_holders_sbuf);
1174
1175 DescribeLockTag(&buf, &locallock->tag.lock);
1176 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1177 lockmode);
1178
1179 /* Gather a list of all lock holders and waiters */
1180 LWLockAcquire(partitionLock, LW_SHARED);
1181 GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1182 &lock_waiters_sbuf, &lockHoldersNum);
1183 LWLockRelease(partitionLock);
1184
1185 ereport(LOG,
1186 (errmsg("process %d could not obtain %s on %s",
1187 MyProcPid, modename, buf.data),
1189 "Process holding the lock: %s, Wait queue: %s.",
1190 "Processes holding the lock: %s, Wait queue: %s.",
1191 lockHoldersNum,
1192 lock_holders_sbuf.data,
1193 lock_waiters_sbuf.data)));
1194
1195 pfree(buf.data);
1196 pfree(lock_holders_sbuf.data);
1197 pfree(lock_waiters_sbuf.data);
1198 }
1199 if (locallockp)
1200 *locallockp = NULL;
1201 return LOCKACQUIRE_NOT_AVAIL;
1202 }
1203 else
1204 {
1206 /* DeadLockReport() will not return */
1207 }
1208 }
1209
1210 /*
1211 * We are now in the lock queue, or the lock was already granted. If
1212 * queued, go to sleep.
1213 */
1214 if (waitResult == PROC_WAIT_STATUS_WAITING)
1215 {
1216 Assert(!dontWait);
1217 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1218 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1219 LWLockRelease(partitionLock);
1220
1221 waitResult = WaitOnLock(locallock, owner);
1222
1223 /*
1224 * NOTE: do not do any material change of state between here and
1225 * return. All required changes in locktable state must have been
1226 * done when the lock was granted to us --- see notes in WaitOnLock.
1227 */
1228
1229 if (waitResult == PROC_WAIT_STATUS_ERROR)
1230 {
1231 /*
1232 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1233 * now.
1234 */
1235 Assert(!dontWait);
1237 /* DeadLockReport() will not return */
1238 }
1239 }
1240 else
1241 LWLockRelease(partitionLock);
1242 Assert(waitResult == PROC_WAIT_STATUS_OK);
1243
1244 /* The lock was granted to us. Update the local lock entry accordingly */
1245 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1246 GrantLockLocal(locallock, owner);
1247
1248 /*
1249 * Lock state is fully up-to-date now; if we error out after this, no
1250 * special error cleanup is required.
1251 */
1253
1254 /*
1255 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1256 * standby server.
1257 */
1258 if (log_lock)
1259 {
1260 /*
1261 * Decode the locktag back to the original values, to avoid sending
1262 * lots of empty bytes with every message. See lock.h to check how a
1263 * locktag is defined for LOCKTAG_RELATION
1264 */
1266 locktag->locktag_field2);
1267 }
1268
1269 return LOCKACQUIRE_OK;
1270}
void DeadLockReport(void)
Definition: deadlock.c:1075
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1276
#define LOG
Definition: elog.h:31
int MyProcPid
Definition: globals.c:47
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1249
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1476
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2861
void AbortStrongLockAcquire(void)
Definition: lock.c:1860
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2782
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1932
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4255
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:267
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1824
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1529
static void FinishStrongLockAcquire(void)
Definition: lock.c:1850
@ LOCKTAG_OBJECT
Definition: lock.h:147
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:506
@ LOCKACQUIRE_OK
Definition: lock.h:504
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:505
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:503
#define RowExclusiveLock
Definition: lockdefs.h:38
void pfree(void *pointer)
Definition: mcxt.c:1594
static char * buf
Definition: pg_test_fsync.c:72
ProcWaitStatus
Definition: proc.h:140
@ PROC_WAIT_STATUS_OK
Definition: proc.h:141
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:142
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:143
ResourceOwner CurrentResourceOwner
Definition: resowner.c:173
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1140
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition: proc.c:1900
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1448
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1431
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
bool lockCleared
Definition: lock.h:442
bool RecoveryInProgress(void)
Definition: xlog.c:6383
#define XLogStandbyInfoActive()
Definition: xlog.h:123
bool InRecovery
Definition: xlogutils.c:50

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert(), BeginStrongLockAcquire(), buf, ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, StringInfoData::data, DeadLockReport(), DescribeLockTag(), dlist_delete(), EligibleForRelationFastPath, elog, ereport, errcode(), errdetail_log_plural(), errhint(), errmsg(), ERROR, FAST_PATH_REL_GROUP, FastPathGrantRelationLock(), FastPathLocalUseCounts, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, GetLockHoldersAndWaiters(), GetLockmodeName(), GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, initStringInfo(), InRecovery, JoinWaitQueue(), lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, MyProcPid, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, pfree(), PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_OK, PROC_WAIT_STATUS_WAITING, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, LOCALLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), ConditionalLockTuple(), ConditionalXactLockTableWait(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1529 of file lock.c.

1533{
1534 int numLockModes = lockMethodTable->numLockModes;
1535 LOCKMASK myLocks;
1536 int conflictMask = lockMethodTable->conflictTab[lockmode];
1537 int conflictsRemaining[MAX_LOCKMODES];
1538 int totalConflictsRemaining = 0;
1539 dlist_iter proclock_iter;
1540 int i;
1541
1542 /*
1543 * first check for global conflicts: If no locks conflict with my request,
1544 * then I get the lock.
1545 *
1546 * Checking for conflict: lock->grantMask represents the types of
1547 * currently held locks. conflictTable[lockmode] has a bit set for each
1548 * type of lock that conflicts with request. Bitwise compare tells if
1549 * there is a conflict.
1550 */
1551 if (!(conflictMask & lock->grantMask))
1552 {
1553 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1554 return false;
1555 }
1556
1557 /*
1558 * Rats. Something conflicts. But it could still be my own lock, or a
1559 * lock held by another member of my locking group. First, figure out how
1560 * many conflicts remain after subtracting out any locks I hold myself.
1561 */
1562 myLocks = proclock->holdMask;
1563 for (i = 1; i <= numLockModes; i++)
1564 {
1565 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1566 {
1567 conflictsRemaining[i] = 0;
1568 continue;
1569 }
1570 conflictsRemaining[i] = lock->granted[i];
1571 if (myLocks & LOCKBIT_ON(i))
1572 --conflictsRemaining[i];
1573 totalConflictsRemaining += conflictsRemaining[i];
1574 }
1575
1576 /* If no conflicts remain, we get the lock. */
1577 if (totalConflictsRemaining == 0)
1578 {
1579 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1580 return false;
1581 }
1582
1583 /* If no group locking, it's definitely a conflict. */
1584 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1585 {
1586 Assert(proclock->tag.myProc == MyProc);
1587 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1588 proclock);
1589 return true;
1590 }
1591
1592 /*
1593 * The relation extension lock conflict even between the group members.
1594 */
1596 {
1597 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1598 proclock);
1599 return true;
1600 }
1601
1602 /*
1603 * Locks held in conflicting modes by members of our own lock group are
1604 * not real conflicts; we can subtract those out and see if we still have
1605 * a conflict. This is O(N) in the number of processes holding or
1606 * awaiting locks on this object. We could improve that by making the
1607 * shared memory state more complex (and larger) but it doesn't seem worth
1608 * it.
1609 */
1610 dlist_foreach(proclock_iter, &lock->procLocks)
1611 {
1612 PROCLOCK *otherproclock =
1613 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1614
1615 if (proclock != otherproclock &&
1616 proclock->groupLeader == otherproclock->groupLeader &&
1617 (otherproclock->holdMask & conflictMask) != 0)
1618 {
1619 int intersectMask = otherproclock->holdMask & conflictMask;
1620
1621 for (i = 1; i <= numLockModes; i++)
1622 {
1623 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1624 {
1625 if (conflictsRemaining[i] <= 0)
1626 elog(PANIC, "proclocks held do not match lock");
1627 conflictsRemaining[i]--;
1628 totalConflictsRemaining--;
1629 }
1630 }
1631
1632 if (totalConflictsRemaining == 0)
1633 {
1634 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1635 proclock);
1636 return false;
1637 }
1638 }
1639 }
1640
1641 /* Nope, it's a real conflict. */
1642 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1643 return true;
1644}
#define LOCK_LOCKTAG(lock)
Definition: lock.h:327

References Assert(), LockMethodData::conflictTab, dlist_iter::cur, dlist_container, dlist_foreach, elog, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by JoinWaitQueue(), LockAcquireExtended(), and ProcLockWakeup().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 696 of file lock.c.

697{
698 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
699 LockMethod lockMethodTable;
700 LOCALLOCKTAG localtag;
701 LOCALLOCK *locallock;
702 LOCK *lock;
703 PROCLOCK *proclock;
704 LWLock *partitionLock;
705 bool hasWaiters = false;
706
707 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
708 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
709 lockMethodTable = LockMethods[lockmethodid];
710 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
711 elog(ERROR, "unrecognized lock mode: %d", lockmode);
712
713#ifdef LOCK_DEBUG
714 if (LOCK_DEBUG_ENABLED(locktag))
715 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
716 locktag->locktag_field1, locktag->locktag_field2,
717 lockMethodTable->lockModeNames[lockmode]);
718#endif
719
720 /*
721 * Find the LOCALLOCK entry for this lock and lockmode
722 */
723 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
724 localtag.lock = *locktag;
725 localtag.mode = lockmode;
726
728 &localtag,
729 HASH_FIND, NULL);
730
731 /*
732 * let the caller print its own error message, too. Do not ereport(ERROR).
733 */
734 if (!locallock || locallock->nLocks <= 0)
735 {
736 elog(WARNING, "you don't own a lock of type %s",
737 lockMethodTable->lockModeNames[lockmode]);
738 return false;
739 }
740
741 /*
742 * Check the shared lock table.
743 */
744 partitionLock = LockHashPartitionLock(locallock->hashcode);
745
746 LWLockAcquire(partitionLock, LW_SHARED);
747
748 /*
749 * We don't need to re-find the lock or proclock, since we kept their
750 * addresses in the locallock table, and they couldn't have been removed
751 * while we were holding a lock on them.
752 */
753 lock = locallock->lock;
754 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
755 proclock = locallock->proclock;
756 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
757
758 /*
759 * Double-check that we are actually holding a lock of the type we want to
760 * release.
761 */
762 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
763 {
764 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
765 LWLockRelease(partitionLock);
766 elog(WARNING, "you don't own a lock of type %s",
767 lockMethodTable->lockModeNames[lockmode]);
768 RemoveLocalLock(locallock);
769 return false;
770 }
771
772 /*
773 * Do the checking.
774 */
775 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
776 hasWaiters = true;
777
778 LWLockRelease(partitionLock);
779
780 return hasWaiters;
781}
#define WARNING
Definition: elog.h:36

References LockMethodData::conflictTab, elog, ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  orstronger 
)

Definition at line 643 of file lock.c.

645{
646 LOCALLOCKTAG localtag;
647 LOCALLOCK *locallock;
648
649 /*
650 * See if there is a LOCALLOCK entry for this lock and lockmode
651 */
652 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
653 localtag.lock = *locktag;
654 localtag.mode = lockmode;
655
657 &localtag,
658 HASH_FIND, NULL);
659
660 if (locallock && locallock->nLocks > 0)
661 return true;
662
663 if (orstronger)
664 {
665 LOCKMODE slockmode;
666
667 for (slockmode = lockmode + 1;
668 slockmode <= MaxLockMode;
669 slockmode++)
670 {
671 if (LockHeldByMe(locktag, slockmode, false))
672 return true;
673 }
674 }
675
676 return false;
677}
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition: lock.c:643
#define MaxLockMode
Definition: lockdefs.h:45

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockHeldByMe(), LockMethodLocalHash, MaxLockMode, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe(), CheckRelationOidLockedByMe(), LockHeldByMe(), and UpdateSubscriptionRelState().

◆ LockManagerShmemInit()

void LockManagerShmemInit ( void  )

Definition at line 444 of file lock.c.

445{
446 HASHCTL info;
447 int64 init_table_size,
448 max_table_size;
449 bool found;
450
451 /*
452 * Compute init/max size to request for lock hashtables. Note these
453 * calculations must agree with LockManagerShmemSize!
454 */
455 max_table_size = NLOCKENTS();
456 init_table_size = max_table_size / 2;
457
458 /*
459 * Allocate hash table for LOCK structs. This stores per-locked-object
460 * information.
461 */
462 info.keysize = sizeof(LOCKTAG);
463 info.entrysize = sizeof(LOCK);
465
466 LockMethodLockHash = ShmemInitHash("LOCK hash",
467 init_table_size,
468 max_table_size,
469 &info,
471
472 /* Assume an average of 2 holders per lock */
473 max_table_size *= 2;
474 init_table_size *= 2;
475
476 /*
477 * Allocate hash table for PROCLOCK structs. This stores
478 * per-lock-per-holder information.
479 */
480 info.keysize = sizeof(PROCLOCKTAG);
481 info.entrysize = sizeof(PROCLOCK);
482 info.hash = proclock_hash;
484
485 LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
486 init_table_size,
487 max_table_size,
488 &info,
490
491 /*
492 * Allocate fast-path structures.
493 */
495 ShmemInitStruct("Fast Path Strong Relation Lock Data",
496 sizeof(FastPathStrongRelationLockData), &found);
497 if (!found)
499}
int64_t int64
Definition: c.h:536
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:56
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:574
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct PROCLOCKTAG PROCLOCKTAG
HTAB * ShmemInitHash(const char *name, int64 init_size, int64 max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:332
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
#define SpinLockInit(lock)
Definition: spin.h:57
HashValueFunc hash
Definition: hsearch.h:78
int64 num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateOrAttachShmemStructs().

◆ LockManagerShmemSize()

Size LockManagerShmemSize ( void  )

Definition at line 3758 of file lock.c.

3759{
3760 Size size = 0;
3761 long max_table_size;
3762
3763 /* lock hash table */
3764 max_table_size = NLOCKENTS();
3765 size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3766
3767 /* proclock hash table */
3768 max_table_size *= 2;
3769 size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3770
3771 /*
3772 * Since NLOCKENTS is only an estimate, add 10% safety margin.
3773 */
3774 size = add_size(size, size / 10);
3775
3776 return size;
3777}
size_t Size
Definition: c.h:611
Size hash_estimate_size(int64 num_entries, Size entrysize)
Definition: dynahash.c:783
Size add_size(Size s1, Size s2)
Definition: shmem.c:493

References add_size(), hash_estimate_size(), and NLOCKENTS.

Referenced by CalculateShmemSize().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2706 of file lock.c.

2707{
2709
2710 Assert(parent != NULL);
2711
2712 if (locallocks == NULL)
2713 {
2714 HASH_SEQ_STATUS status;
2715 LOCALLOCK *locallock;
2716
2718
2719 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2720 LockReassignOwner(locallock, parent);
2721 }
2722 else
2723 {
2724 int i;
2725
2726 for (i = nlocks - 1; i >= 0; i--)
2727 LockReassignOwner(locallocks[i], parent);
2728 }
2729}
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2736
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:902

References Assert(), CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2736 of file lock.c.

2737{
2738 LOCALLOCKOWNER *lockOwners;
2739 int i;
2740 int ic = -1;
2741 int ip = -1;
2742
2743 /*
2744 * Scan to see if there are any locks belonging to current owner or its
2745 * parent
2746 */
2747 lockOwners = locallock->lockOwners;
2748 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2749 {
2750 if (lockOwners[i].owner == CurrentResourceOwner)
2751 ic = i;
2752 else if (lockOwners[i].owner == parent)
2753 ip = i;
2754 }
2755
2756 if (ic < 0)
2757 return; /* no current locks */
2758
2759 if (ip < 0)
2760 {
2761 /* Parent has no slot, so just give it the child's slot */
2762 lockOwners[ic].owner = parent;
2763 ResourceOwnerRememberLock(parent, locallock);
2764 }
2765 else
2766 {
2767 /* Merge child's count with parent's */
2768 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2769 /* compact out unused slot */
2770 locallock->numLockOwners--;
2771 if (ic < locallock->numLockOwners)
2772 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2773 }
2775}
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1079

References CurrentResourceOwner, i, LOCALLOCK::lockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3286 of file lock.c.

3289{
3290 LOCK *lock;
3291 PROCLOCK *proclock;
3292 PROCLOCKTAG proclocktag;
3293 uint32 hashcode;
3294 uint32 proclock_hashcode;
3295 LWLock *partitionLock;
3296 bool wakeupNeeded;
3297
3298 hashcode = LockTagHashCode(locktag);
3299 partitionLock = LockHashPartitionLock(hashcode);
3300
3301 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3302
3303 /*
3304 * Re-find the lock object (it had better be there).
3305 */
3307 locktag,
3308 hashcode,
3309 HASH_FIND,
3310 NULL);
3311 if (!lock)
3312 elog(PANIC, "failed to re-find shared lock object");
3313
3314 /*
3315 * Re-find the proclock object (ditto).
3316 */
3317 proclocktag.myLock = lock;
3318 proclocktag.myProc = proc;
3319
3320 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3321
3323 &proclocktag,
3324 proclock_hashcode,
3325 HASH_FIND,
3326 NULL);
3327 if (!proclock)
3328 elog(PANIC, "failed to re-find shared proclock object");
3329
3330 /*
3331 * Double-check that we are actually holding a lock of the type we want to
3332 * release.
3333 */
3334 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3335 {
3336 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3337 LWLockRelease(partitionLock);
3338 elog(WARNING, "you don't own a lock of type %s",
3339 lockMethodTable->lockModeNames[lockmode]);
3340 return;
3341 }
3342
3343 /*
3344 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3345 */
3346 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3347
3348 CleanUpLock(lock, proclock,
3349 lockMethodTable, hashcode,
3350 wakeupNeeded);
3351
3352 LWLockRelease(partitionLock);
3353
3354 /*
3355 * Decrement strong lock count. This logic is needed only for 2PC.
3356 */
3357 if (decrement_strong_lock_count
3358 && ConflictsWithRelationFastPath(locktag, lockmode))
3359 {
3360 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3361
3363 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3364 FastPathStrongRelationLocks->count[fasthashcode]--;
3366 }
3367}
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1681
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1738

References Assert(), CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 2102 of file lock.c.

2103{
2104 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2105 LockMethod lockMethodTable;
2106 LOCALLOCKTAG localtag;
2107 LOCALLOCK *locallock;
2108 LOCK *lock;
2109 PROCLOCK *proclock;
2110 LWLock *partitionLock;
2111 bool wakeupNeeded;
2112
2113 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2114 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2115 lockMethodTable = LockMethods[lockmethodid];
2116 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2117 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2118
2119#ifdef LOCK_DEBUG
2120 if (LOCK_DEBUG_ENABLED(locktag))
2121 elog(LOG, "LockRelease: lock [%u,%u] %s",
2122 locktag->locktag_field1, locktag->locktag_field2,
2123 lockMethodTable->lockModeNames[lockmode]);
2124#endif
2125
2126 /*
2127 * Find the LOCALLOCK entry for this lock and lockmode
2128 */
2129 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2130 localtag.lock = *locktag;
2131 localtag.mode = lockmode;
2132
2134 &localtag,
2135 HASH_FIND, NULL);
2136
2137 /*
2138 * let the caller print its own error message, too. Do not ereport(ERROR).
2139 */
2140 if (!locallock || locallock->nLocks <= 0)
2141 {
2142 elog(WARNING, "you don't own a lock of type %s",
2143 lockMethodTable->lockModeNames[lockmode]);
2144 return false;
2145 }
2146
2147 /*
2148 * Decrease the count for the resource owner.
2149 */
2150 {
2151 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2152 ResourceOwner owner;
2153 int i;
2154
2155 /* Identify owner for lock */
2156 if (sessionLock)
2157 owner = NULL;
2158 else
2159 owner = CurrentResourceOwner;
2160
2161 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2162 {
2163 if (lockOwners[i].owner == owner)
2164 {
2165 Assert(lockOwners[i].nLocks > 0);
2166 if (--lockOwners[i].nLocks == 0)
2167 {
2168 if (owner != NULL)
2169 ResourceOwnerForgetLock(owner, locallock);
2170 /* compact out unused slot */
2171 locallock->numLockOwners--;
2172 if (i < locallock->numLockOwners)
2173 lockOwners[i] = lockOwners[locallock->numLockOwners];
2174 }
2175 break;
2176 }
2177 }
2178 if (i < 0)
2179 {
2180 /* don't release a lock belonging to another owner */
2181 elog(WARNING, "you don't own a lock of type %s",
2182 lockMethodTable->lockModeNames[lockmode]);
2183 return false;
2184 }
2185 }
2186
2187 /*
2188 * Decrease the total local count. If we're still holding the lock, we're
2189 * done.
2190 */
2191 locallock->nLocks--;
2192
2193 if (locallock->nLocks > 0)
2194 return true;
2195
2196 /*
2197 * At this point we can no longer suppose we are clear of invalidation
2198 * messages related to this lock. Although we'll delete the LOCALLOCK
2199 * object before any intentional return from this routine, it seems worth
2200 * the trouble to explicitly reset lockCleared right now, just in case
2201 * some error prevents us from deleting the LOCALLOCK.
2202 */
2203 locallock->lockCleared = false;
2204
2205 /* Attempt fast release of any lock eligible for the fast path. */
2206 if (EligibleForRelationFastPath(locktag, lockmode) &&
2208 {
2209 bool released;
2210
2211 /*
2212 * We might not find the lock here, even if we originally entered it
2213 * here. Another backend may have moved it to the main table.
2214 */
2216 released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2217 lockmode);
2219 if (released)
2220 {
2221 RemoveLocalLock(locallock);
2222 return true;
2223 }
2224 }
2225
2226 /*
2227 * Otherwise we've got to mess with the shared lock table.
2228 */
2229 partitionLock = LockHashPartitionLock(locallock->hashcode);
2230
2231 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2232
2233 /*
2234 * Normally, we don't need to re-find the lock or proclock, since we kept
2235 * their addresses in the locallock table, and they couldn't have been
2236 * removed while we were holding a lock on them. But it's possible that
2237 * the lock was taken fast-path and has since been moved to the main hash
2238 * table by another backend, in which case we will need to look up the
2239 * objects here. We assume the lock field is NULL if so.
2240 */
2241 lock = locallock->lock;
2242 if (!lock)
2243 {
2244 PROCLOCKTAG proclocktag;
2245
2246 Assert(EligibleForRelationFastPath(locktag, lockmode));
2248 locktag,
2249 locallock->hashcode,
2250 HASH_FIND,
2251 NULL);
2252 if (!lock)
2253 elog(ERROR, "failed to re-find shared lock object");
2254 locallock->lock = lock;
2255
2256 proclocktag.myLock = lock;
2257 proclocktag.myProc = MyProc;
2259 &proclocktag,
2260 HASH_FIND,
2261 NULL);
2262 if (!locallock->proclock)
2263 elog(ERROR, "failed to re-find shared proclock object");
2264 }
2265 LOCK_PRINT("LockRelease: found", lock, lockmode);
2266 proclock = locallock->proclock;
2267 PROCLOCK_PRINT("LockRelease: found", proclock);
2268
2269 /*
2270 * Double-check that we are actually holding a lock of the type we want to
2271 * release.
2272 */
2273 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2274 {
2275 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2276 LWLockRelease(partitionLock);
2277 elog(WARNING, "you don't own a lock of type %s",
2278 lockMethodTable->lockModeNames[lockmode]);
2279 RemoveLocalLock(locallock);
2280 return false;
2281 }
2282
2283 /*
2284 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2285 */
2286 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2287
2288 CleanUpLock(lock, proclock,
2289 lockMethodTable, locallock->hashcode,
2290 wakeupNeeded);
2291
2292 LWLockRelease(partitionLock);
2293
2294 RemoveLocalLock(locallock);
2295 return true;
2296}
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2825

References Assert(), CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FAST_PATH_REL_GROUP, FastPathLocalUseCounts, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SearchSysCacheLocked1(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2307 of file lock.c.

2308{
2309 HASH_SEQ_STATUS status;
2310 LockMethod lockMethodTable;
2311 int i,
2312 numLockModes;
2313 LOCALLOCK *locallock;
2314 LOCK *lock;
2315 int partition;
2316 bool have_fast_path_lwlock = false;
2317
2318 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2319 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2320 lockMethodTable = LockMethods[lockmethodid];
2321
2322#ifdef LOCK_DEBUG
2323 if (*(lockMethodTable->trace_flag))
2324 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2325#endif
2326
2327 /*
2328 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2329 * the only way that the lock we hold on our own VXID can ever get
2330 * released: it is always and only released when a toplevel transaction
2331 * ends.
2332 */
2333 if (lockmethodid == DEFAULT_LOCKMETHOD)
2335
2336 numLockModes = lockMethodTable->numLockModes;
2337
2338 /*
2339 * First we run through the locallock table and get rid of unwanted
2340 * entries, then we scan the process's proclocks and get rid of those. We
2341 * do this separately because we may have multiple locallock entries
2342 * pointing to the same proclock, and we daren't end up with any dangling
2343 * pointers. Fast-path locks are cleaned up during the locallock table
2344 * scan, though.
2345 */
2347
2348 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2349 {
2350 /*
2351 * If the LOCALLOCK entry is unused, something must've gone wrong
2352 * while trying to acquire this lock. Just forget the local entry.
2353 */
2354 if (locallock->nLocks == 0)
2355 {
2356 RemoveLocalLock(locallock);
2357 continue;
2358 }
2359
2360 /* Ignore items that are not of the lockmethod to be removed */
2361 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2362 continue;
2363
2364 /*
2365 * If we are asked to release all locks, we can just zap the entry.
2366 * Otherwise, must scan to see if there are session locks. We assume
2367 * there is at most one lockOwners entry for session locks.
2368 */
2369 if (!allLocks)
2370 {
2371 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2372
2373 /* If session lock is above array position 0, move it down to 0 */
2374 for (i = 0; i < locallock->numLockOwners; i++)
2375 {
2376 if (lockOwners[i].owner == NULL)
2377 lockOwners[0] = lockOwners[i];
2378 else
2379 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2380 }
2381
2382 if (locallock->numLockOwners > 0 &&
2383 lockOwners[0].owner == NULL &&
2384 lockOwners[0].nLocks > 0)
2385 {
2386 /* Fix the locallock to show just the session locks */
2387 locallock->nLocks = lockOwners[0].nLocks;
2388 locallock->numLockOwners = 1;
2389 /* We aren't deleting this locallock, so done */
2390 continue;
2391 }
2392 else
2393 locallock->numLockOwners = 0;
2394 }
2395
2396#ifdef USE_ASSERT_CHECKING
2397
2398 /*
2399 * Tuple locks are currently held only for short durations within a
2400 * transaction. Check that we didn't forget to release one.
2401 */
2402 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
2403 elog(WARNING, "tuple lock held at commit");
2404#endif
2405
2406 /*
2407 * If the lock or proclock pointers are NULL, this lock was taken via
2408 * the relation fast-path (and is not known to have been transferred).
2409 */
2410 if (locallock->proclock == NULL || locallock->lock == NULL)
2411 {
2412 LOCKMODE lockmode = locallock->tag.mode;
2413 Oid relid;
2414
2415 /* Verify that a fast-path lock is what we've got. */
2416 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2417 elog(PANIC, "locallock table corrupted");
2418
2419 /*
2420 * If we don't currently hold the LWLock that protects our
2421 * fast-path data structures, we must acquire it before attempting
2422 * to release the lock via the fast-path. We will continue to
2423 * hold the LWLock until we're done scanning the locallock table,
2424 * unless we hit a transferred fast-path lock. (XXX is this
2425 * really such a good idea? There could be a lot of entries ...)
2426 */
2427 if (!have_fast_path_lwlock)
2428 {
2430 have_fast_path_lwlock = true;
2431 }
2432
2433 /* Attempt fast-path release. */
2434 relid = locallock->tag.lock.locktag_field2;
2435 if (FastPathUnGrantRelationLock(relid, lockmode))
2436 {
2437 RemoveLocalLock(locallock);
2438 continue;
2439 }
2440
2441 /*
2442 * Our lock, originally taken via the fast path, has been
2443 * transferred to the main lock table. That's going to require
2444 * some extra work, so release our fast-path lock before starting.
2445 */
2447 have_fast_path_lwlock = false;
2448
2449 /*
2450 * Now dump the lock. We haven't got a pointer to the LOCK or
2451 * PROCLOCK in this case, so we have to handle this a bit
2452 * differently than a normal lock release. Unfortunately, this
2453 * requires an extra LWLock acquire-and-release cycle on the
2454 * partitionLock, but hopefully it shouldn't happen often.
2455 */
2456 LockRefindAndRelease(lockMethodTable, MyProc,
2457 &locallock->tag.lock, lockmode, false);
2458 RemoveLocalLock(locallock);
2459 continue;
2460 }
2461
2462 /* Mark the proclock to show we need to release this lockmode */
2463 if (locallock->nLocks > 0)
2464 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2465
2466 /* And remove the locallock hashtable entry */
2467 RemoveLocalLock(locallock);
2468 }
2469
2470 /* Done with the fast-path data structures */
2471 if (have_fast_path_lwlock)
2473
2474 /*
2475 * Now, scan each lock partition separately.
2476 */
2477 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2478 {
2479 LWLock *partitionLock;
2480 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2481 dlist_mutable_iter proclock_iter;
2482
2483 partitionLock = LockHashPartitionLockByIndex(partition);
2484
2485 /*
2486 * If the proclock list for this partition is empty, we can skip
2487 * acquiring the partition lock. This optimization is trickier than
2488 * it looks, because another backend could be in process of adding
2489 * something to our proclock list due to promoting one of our
2490 * fast-path locks. However, any such lock must be one that we
2491 * decided not to delete above, so it's okay to skip it again now;
2492 * we'd just decide not to delete it again. We must, however, be
2493 * careful to re-fetch the list header once we've acquired the
2494 * partition lock, to be sure we have a valid, up-to-date pointer.
2495 * (There is probably no significant risk if pointer fetch/store is
2496 * atomic, but we don't wish to assume that.)
2497 *
2498 * XXX This argument assumes that the locallock table correctly
2499 * represents all of our fast-path locks. While allLocks mode
2500 * guarantees to clean up all of our normal locks regardless of the
2501 * locallock situation, we lose that guarantee for fast-path locks.
2502 * This is not ideal.
2503 */
2504 if (dlist_is_empty(procLocks))
2505 continue; /* needn't examine this partition */
2506
2507 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2508
2509 dlist_foreach_modify(proclock_iter, procLocks)
2510 {
2511 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2512 bool wakeupNeeded = false;
2513
2514 Assert(proclock->tag.myProc == MyProc);
2515
2516 lock = proclock->tag.myLock;
2517
2518 /* Ignore items that are not of the lockmethod to be removed */
2519 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2520 continue;
2521
2522 /*
2523 * In allLocks mode, force release of all locks even if locallock
2524 * table had problems
2525 */
2526 if (allLocks)
2527 proclock->releaseMask = proclock->holdMask;
2528 else
2529 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2530
2531 /*
2532 * Ignore items that have nothing to be released, unless they have
2533 * holdMask == 0 and are therefore recyclable
2534 */
2535 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2536 continue;
2537
2538 PROCLOCK_PRINT("LockReleaseAll", proclock);
2539 LOCK_PRINT("LockReleaseAll", lock, 0);
2540 Assert(lock->nRequested >= 0);
2541 Assert(lock->nGranted >= 0);
2542 Assert(lock->nGranted <= lock->nRequested);
2543 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2544
2545 /*
2546 * Release the previously-marked lock modes
2547 */
2548 for (i = 1; i <= numLockModes; i++)
2549 {
2550 if (proclock->releaseMask & LOCKBIT_ON(i))
2551 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2552 lockMethodTable);
2553 }
2554 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2555 Assert(lock->nGranted <= lock->nRequested);
2556 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2557
2558 proclock->releaseMask = 0;
2559
2560 /* CleanUpLock will wake up waiters if needed. */
2561 CleanUpLock(lock, proclock,
2562 lockMethodTable,
2563 LockTagHashCode(&lock->tag),
2564 wakeupNeeded);
2565 } /* loop over PROCLOCKs within this partition */
2566
2567 LWLockRelease(partitionLock);
2568 } /* loop over partitions */
2569
2570#ifdef LOCK_DEBUG
2571 if (*(lockMethodTable->trace_flag))
2572 elog(LOG, "LockReleaseAll done");
2573#endif
2574}
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4645
@ LOCKTAG_TUPLE
Definition: lock.h:143
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:445
const bool * trace_flag
Definition: lock.h:115
dlist_node * cur
Definition: ilist.h:200

References Assert(), CleanUpLock(), dlist_mutable_iter::cur, DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCK_LOCKTAG, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LOCKTAG_TUPLE, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, PANIC, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), VirtualXactLockTableCleanup(), and WARNING.

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2611 of file lock.c.

2612{
2613 if (locallocks == NULL)
2614 {
2615 HASH_SEQ_STATUS status;
2616 LOCALLOCK *locallock;
2617
2619
2620 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2621 ReleaseLockIfHeld(locallock, false);
2622 }
2623 else
2624 {
2625 int i;
2626
2627 for (i = nlocks - 1; i >= 0; i--)
2628 ReleaseLockIfHeld(locallocks[i], false);
2629 }
2630}
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2646

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2581 of file lock.c.

2582{
2583 HASH_SEQ_STATUS status;
2584 LOCALLOCK *locallock;
2585
2586 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2587 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2588
2590
2591 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2592 {
2593 /* Ignore items that are not of the specified lock method */
2594 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2595 continue;
2596
2597 ReleaseLockIfHeld(locallock, true);
2598 }
2599}

References elog, ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 557 of file lock.c.

558{
559 return get_hash_value(LockMethodLockHash, locktag);
560}
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:908

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4856 of file lock.c.

4857{
4858 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4859 LOCK *lock;
4860 bool found;
4861 uint32 hashcode;
4862 LWLock *partitionLock;
4863 int waiters = 0;
4864
4865 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4866 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4867
4868 hashcode = LockTagHashCode(locktag);
4869 partitionLock = LockHashPartitionLock(hashcode);
4870 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4871
4873 locktag,
4874 hashcode,
4875 HASH_FIND,
4876 &found);
4877 if (found)
4878 {
4879 Assert(lock != NULL);
4880 waiters = lock->nRequested;
4881 }
4882 LWLockRelease(partitionLock);
4883
4884 return waiters;
4885}

References Assert(), elog, ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

◆ PostPrepare_Locks()

void PostPrepare_Locks ( FullTransactionId  fxid)

Definition at line 3574 of file lock.c.

3575{
3576 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3577 HASH_SEQ_STATUS status;
3578 LOCALLOCK *locallock;
3579 LOCK *lock;
3580 PROCLOCK *proclock;
3581 PROCLOCKTAG proclocktag;
3582 int partition;
3583
3584 /* Can't prepare a lock group follower. */
3585 Assert(MyProc->lockGroupLeader == NULL ||
3587
3588 /* This is a critical section: any error means big trouble */
3590
3591 /*
3592 * First we run through the locallock table and get rid of unwanted
3593 * entries, then we scan the process's proclocks and transfer them to the
3594 * target proc.
3595 *
3596 * We do this separately because we may have multiple locallock entries
3597 * pointing to the same proclock, and we daren't end up with any dangling
3598 * pointers.
3599 */
3601
3602 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3603 {
3604 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3605 bool haveSessionLock;
3606 bool haveXactLock;
3607 int i;
3608
3609 if (locallock->proclock == NULL || locallock->lock == NULL)
3610 {
3611 /*
3612 * We must've run out of shared memory while trying to set up this
3613 * lock. Just forget the local entry.
3614 */
3615 Assert(locallock->nLocks == 0);
3616 RemoveLocalLock(locallock);
3617 continue;
3618 }
3619
3620 /* Ignore VXID locks */
3622 continue;
3623
3624 /* Scan to see whether we hold it at session or transaction level */
3625 haveSessionLock = haveXactLock = false;
3626 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3627 {
3628 if (lockOwners[i].owner == NULL)
3629 haveSessionLock = true;
3630 else
3631 haveXactLock = true;
3632 }
3633
3634 /* Ignore it if we have only session lock */
3635 if (!haveXactLock)
3636 continue;
3637
3638 /* This can't happen, because we already checked it */
3639 if (haveSessionLock)
3640 ereport(PANIC,
3641 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3642 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3643
3644 /* Mark the proclock to show we need to release this lockmode */
3645 if (locallock->nLocks > 0)
3646 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3647
3648 /* And remove the locallock hashtable entry */
3649 RemoveLocalLock(locallock);
3650 }
3651
3652 /*
3653 * Now, scan each lock partition separately.
3654 */
3655 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3656 {
3657 LWLock *partitionLock;
3658 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3659 dlist_mutable_iter proclock_iter;
3660
3661 partitionLock = LockHashPartitionLockByIndex(partition);
3662
3663 /*
3664 * If the proclock list for this partition is empty, we can skip
3665 * acquiring the partition lock. This optimization is safer than the
3666 * situation in LockReleaseAll, because we got rid of any fast-path
3667 * locks during AtPrepare_Locks, so there cannot be any case where
3668 * another backend is adding something to our lists now. For safety,
3669 * though, we code this the same way as in LockReleaseAll.
3670 */
3671 if (dlist_is_empty(procLocks))
3672 continue; /* needn't examine this partition */
3673
3674 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3675
3676 dlist_foreach_modify(proclock_iter, procLocks)
3677 {
3678 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3679
3680 Assert(proclock->tag.myProc == MyProc);
3681
3682 lock = proclock->tag.myLock;
3683
3684 /* Ignore VXID locks */
3686 continue;
3687
3688 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3689 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3690 Assert(lock->nRequested >= 0);
3691 Assert(lock->nGranted >= 0);
3692 Assert(lock->nGranted <= lock->nRequested);
3693 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3694
3695 /* Ignore it if nothing to release (must be a session lock) */
3696 if (proclock->releaseMask == 0)
3697 continue;
3698
3699 /* Else we should be releasing all locks */
3700 if (proclock->releaseMask != proclock->holdMask)
3701 elog(PANIC, "we seem to have dropped a bit somewhere");
3702
3703 /*
3704 * We cannot simply modify proclock->tag.myProc to reassign
3705 * ownership of the lock, because that's part of the hash key and
3706 * the proclock would then be in the wrong hash chain. Instead
3707 * use hash_update_hash_key. (We used to create a new hash entry,
3708 * but that risks out-of-memory failure if other processes are
3709 * busy making proclocks too.) We must unlink the proclock from
3710 * our procLink chain and put it into the new proc's chain, too.
3711 *
3712 * Note: the updated proclock hash key will still belong to the
3713 * same hash partition, cf proclock_hash(). So the partition lock
3714 * we already hold is sufficient for this.
3715 */
3716 dlist_delete(&proclock->procLink);
3717
3718 /*
3719 * Create the new hash key for the proclock.
3720 */
3721 proclocktag.myLock = lock;
3722 proclocktag.myProc = newproc;
3723
3724 /*
3725 * Update groupLeader pointer to point to the new proc. (We'd
3726 * better not be a member of somebody else's lock group!)
3727 */
3728 Assert(proclock->groupLeader == proclock->tag.myProc);
3729 proclock->groupLeader = newproc;
3730
3731 /*
3732 * Update the proclock. We should not find any existing entry for
3733 * the same hash key, since there can be only one entry for any
3734 * given lock with my own proc.
3735 */
3737 proclock,
3738 &proclocktag))
3739 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3740
3741 /* Re-link into the new proc's proclock list */
3742 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3743
3744 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3745 } /* loop over PROCLOCKs within this partition */
3746
3747 LWLockRelease(partitionLock);
3748 } /* loop over partitions */
3749
3751}
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1140
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151

References Assert(), dlist_mutable_iter::cur, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void *  key,
Size  keysize 
)
static

Definition at line 574 of file lock.c.

575{
576 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
577 uint32 lockhash;
578 Datum procptr;
579
580 Assert(keysize == sizeof(PROCLOCKTAG));
581
582 /* Look into the associated LOCK object, and compute its hash code */
583 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
584
585 /*
586 * To make the hash code also depend on the PGPROC, we xor the proc
587 * struct's address into the hash code, left-shifted so that the
588 * partition-number bits don't change. Since this is only a hash, we
589 * don't care if we lose high-order bits of the address; use an
590 * intermediate variable to suppress cast-pointer-to-int warnings.
591 */
592 procptr = PointerGetDatum(proclocktag->myProc);
593 lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
594
595 return lockhash;
596}
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:94
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:232
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
uint64_t Datum
Definition: postgres.h:70

References Assert(), DatumGetUInt32(), sort-test::key, LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PointerGetDatum(), and LOCK::tag.

Referenced by LockManagerShmemInit().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 605 of file lock.c.

606{
607 uint32 lockhash = hashcode;
608 Datum procptr;
609
610 /*
611 * This must match proclock_hash()!
612 */
613 procptr = PointerGetDatum(proclocktag->myProc);
614 lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
615
616 return lockhash;
617}

References DatumGetUInt32(), LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myProc, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2646 of file lock.c.

2647{
2648 ResourceOwner owner;
2649 LOCALLOCKOWNER *lockOwners;
2650 int i;
2651
2652 /* Identify owner for lock (must match LockRelease!) */
2653 if (sessionLock)
2654 owner = NULL;
2655 else
2656 owner = CurrentResourceOwner;
2657
2658 /* Scan to see if there are any locks belonging to the target owner */
2659 lockOwners = locallock->lockOwners;
2660 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2661 {
2662 if (lockOwners[i].owner == owner)
2663 {
2664 Assert(lockOwners[i].nLocks > 0);
2665 if (lockOwners[i].nLocks < locallock->nLocks)
2666 {
2667 /*
2668 * We will still hold this lock after forgetting this
2669 * ResourceOwner.
2670 */
2671 locallock->nLocks -= lockOwners[i].nLocks;
2672 /* compact out unused slot */
2673 locallock->numLockOwners--;
2674 if (owner != NULL)
2675 ResourceOwnerForgetLock(owner, locallock);
2676 if (i < locallock->numLockOwners)
2677 lockOwners[i] = lockOwners[locallock->numLockOwners];
2678 }
2679 else
2680 {
2681 Assert(lockOwners[i].nLocks == locallock->nLocks);
2682 /* We want to call LockRelease just once */
2683 lockOwners[i].nLocks = 1;
2684 locallock->nLocks = 1;
2685 if (!LockRelease(&locallock->tag.lock,
2686 locallock->tag.mode,
2687 sessionLock))
2688 elog(WARNING, "ReleaseLockIfHeld: failed??");
2689 }
2690 break;
2691 }
2692 }
2693}
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:2102

References Assert(), CurrentResourceOwner, elog, i, LOCALLOCKTAG::lock, LOCALLOCK::lockOwners, LockRelease(), LOCALLOCKTAG::mode, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, ResourceOwnerForgetLock(), LOCALLOCK::tag, and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 2046 of file lock.c.

2047{
2048 LOCK *waitLock = proc->waitLock;
2049 PROCLOCK *proclock = proc->waitProcLock;
2050 LOCKMODE lockmode = proc->waitLockMode;
2051 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
2052
2053 /* Make sure proc is waiting */
2055 Assert(proc->links.next != NULL);
2056 Assert(waitLock);
2057 Assert(!dclist_is_empty(&waitLock->waitProcs));
2058 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
2059
2060 /* Remove proc from lock's wait queue */
2061 dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
2062
2063 /* Undo increments of request counts by waiting process */
2064 Assert(waitLock->nRequested > 0);
2065 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2066 waitLock->nRequested--;
2067 Assert(waitLock->requested[lockmode] > 0);
2068 waitLock->requested[lockmode]--;
2069 /* don't forget to clear waitMask bit if appropriate */
2070 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2071 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2072
2073 /* Clean up the proc's own state, and pass it the ok/fail signal */
2074 proc->waitLock = NULL;
2075 proc->waitProcLock = NULL;
2077
2078 /*
2079 * Delete the proclock immediately if it represents no already-held locks.
2080 * (This must happen now because if the owner of the lock decides to
2081 * release it, and the requested/granted counts then go to zero,
2082 * LockRelease expects there to be no remaining proclocks.) Then see if
2083 * any other waiters for the lock can be woken up now.
2084 */
2085 CleanUpLock(waitLock, proclock,
2086 LockMethods[lockmethodid], hashcode,
2087 true);
2088}
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
PROCLOCK * waitProcLock
Definition: proc.h:250
ProcWaitStatus waitStatus
Definition: proc.h:184

References Assert(), CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), and LockErrorCleanup().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1476 of file lock.c.

1477{
1478 int i;
1479
1480 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1481 {
1482 if (locallock->lockOwners[i].owner != NULL)
1483 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1484 }
1485 locallock->numLockOwners = 0;
1486 if (locallock->lockOwners != NULL)
1487 pfree(locallock->lockOwners);
1488 locallock->lockOwners = NULL;
1489
1490 if (locallock->holdsStrongLockCount)
1491 {
1492 uint32 fasthashcode;
1493
1494 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1495
1497 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1498 FastPathStrongRelationLocks->count[fasthashcode]--;
1499 locallock->holdsStrongLockCount = false;
1501 }
1502
1504 &(locallock->tag),
1505 HASH_REMOVE, NULL))
1506 elog(WARNING, "locallock table corrupted");
1507
1508 /*
1509 * Indicate that the lock is released for certain types of locks
1510 */
1511 CheckAndSetLockHeld(locallock, false);
1512}

References Assert(), CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_REMOVE, hash_search(), LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, i, LockMethodLocalHash, LOCALLOCK::lockOwners, FastPathStrongRelationLockData::mutex, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire, SpinLockRelease, LOCALLOCK::tag, and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ ResetAwaitedLock()

void ResetAwaitedLock ( void  )

Definition at line 1907 of file lock.c.

1908{
1909 awaitedLock = NULL;
1910}

References awaitedLock.

Referenced by LockErrorCleanup().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1283 of file lock.c.

1285{
1286 LOCK *lock;
1287 PROCLOCK *proclock;
1288 PROCLOCKTAG proclocktag;
1289 uint32 proclock_hashcode;
1290 bool found;
1291
1292 /*
1293 * Find or create a lock with this tag.
1294 */
1296 locktag,
1297 hashcode,
1299 &found);
1300 if (!lock)
1301 return NULL;
1302
1303 /*
1304 * if it's a new lock object, initialize it
1305 */
1306 if (!found)
1307 {
1308 lock->grantMask = 0;
1309 lock->waitMask = 0;
1310 dlist_init(&lock->procLocks);
1311 dclist_init(&lock->waitProcs);
1312 lock->nRequested = 0;
1313 lock->nGranted = 0;
1314 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1315 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1316 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1317 }
1318 else
1319 {
1320 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1321 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1322 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1323 Assert(lock->nGranted <= lock->nRequested);
1324 }
1325
1326 /*
1327 * Create the hash key for the proclock table.
1328 */
1329 proclocktag.myLock = lock;
1330 proclocktag.myProc = proc;
1331
1332 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1333
1334 /*
1335 * Find or create a proclock entry with this tag
1336 */
1338 &proclocktag,
1339 proclock_hashcode,
1341 &found);
1342 if (!proclock)
1343 {
1344 /* Oops, not enough shmem for the proclock */
1345 if (lock->nRequested == 0)
1346 {
1347 /*
1348 * There are no other requestors of this lock, so garbage-collect
1349 * the lock object. We *must* do this to avoid a permanent leak
1350 * of shared memory, because there won't be anything to cause
1351 * anyone to release the lock object later.
1352 */
1353 Assert(dlist_is_empty(&(lock->procLocks)));
1355 &(lock->tag),
1356 hashcode,
1358 NULL))
1359 elog(PANIC, "lock table corrupted");
1360 }
1361 return NULL;
1362 }
1363
1364 /*
1365 * If new, initialize the new entry
1366 */
1367 if (!found)
1368 {
1369 uint32 partition = LockHashPartition(hashcode);
1370
1371 /*
1372 * It might seem unsafe to access proclock->groupLeader without a
1373 * lock, but it's not really. Either we are initializing a proclock
1374 * on our own behalf, in which case our group leader isn't changing
1375 * because the group leader for a process can only ever be changed by
1376 * the process itself; or else we are transferring a fast-path lock to
1377 * the main lock table, in which case that process can't change its
1378 * lock group leader without first releasing all of its locks (and in
1379 * particular the one we are currently transferring).
1380 */
1381 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1382 proc->lockGroupLeader : proc;
1383 proclock->holdMask = 0;
1384 proclock->releaseMask = 0;
1385 /* Add proclock to appropriate lists */
1386 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1387 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1388 PROCLOCK_PRINT("LockAcquire: new", proclock);
1389 }
1390 else
1391 {
1392 PROCLOCK_PRINT("LockAcquire: found", proclock);
1393 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1394
1395#ifdef CHECK_DEADLOCK_RISK
1396
1397 /*
1398 * Issue warning if we already hold a lower-level lock on this object
1399 * and do not hold a lock of the requested level or higher. This
1400 * indicates a deadlock-prone coding practice (eg, we'd have a
1401 * deadlock if another backend were following the same code path at
1402 * about the same time).
1403 *
1404 * This is not enabled by default, because it may generate log entries
1405 * about user-level coding practices that are in fact safe in context.
1406 * It can be enabled to help find system-level problems.
1407 *
1408 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1409 * better to use a table. For now, though, this works.
1410 */
1411 {
1412 int i;
1413
1414 for (i = lockMethodTable->numLockModes; i > 0; i--)
1415 {
1416 if (proclock->holdMask & LOCKBIT_ON(i))
1417 {
1418 if (i >= (int) lockmode)
1419 break; /* safe: we have a lock >= req level */
1420 elog(LOG, "deadlock risk: raising lock level"
1421 " from %s to %s on object %u/%u/%u",
1422 lockMethodTable->lockModeNames[i],
1423 lockMethodTable->lockModeNames[lockmode],
1424 lock->tag.locktag_field1, lock->tag.locktag_field2,
1425 lock->tag.locktag_field3);
1426 break;
1427 }
1428 }
1429 }
1430#endif /* CHECK_DEADLOCK_RISK */
1431 }
1432
1433 /*
1434 * lock->nRequested and lock->requested[] count the total number of
1435 * requests, whether granted or waiting, so increment those immediately.
1436 * The other counts don't increment till we get the lock.
1437 */
1438 lock->nRequested++;
1439 lock->requested[lockmode]++;
1440 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1441
1442 /*
1443 * We shouldn't already hold the desired lock; else locallock table is
1444 * broken.
1445 */
1446 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1447 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1448 lockMethodTable->lockModeNames[lockmode],
1449 lock->tag.locktag_field1, lock->tag.locktag_field2,
1450 lock->tag.locktag_field3);
1451
1452 return proclock;
1453}

References Assert(), dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ERROR, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, LockMethodData::numLockModes, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1681 of file lock.c.

1683{
1684 bool wakeupNeeded = false;
1685
1686 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1687 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1688 Assert(lock->nGranted <= lock->nRequested);
1689
1690 /*
1691 * fix the general lock stats
1692 */
1693 lock->nRequested--;
1694 lock->requested[lockmode]--;
1695 lock->nGranted--;
1696 lock->granted[lockmode]--;
1697
1698 if (lock->granted[lockmode] == 0)
1699 {
1700 /* change the conflict mask. No more of this lock type. */
1701 lock->grantMask &= LOCKBIT_OFF(lockmode);
1702 }
1703
1704 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1705
1706 /*
1707 * We need only run ProcLockWakeup if the released lock conflicts with at
1708 * least one of the lock types requested by waiter(s). Otherwise whatever
1709 * conflict made them wait must still exist. NOTE: before MVCC, we could
1710 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1711 * not true anymore, because the remaining granted locks might belong to
1712 * some waiter, who could now be awakened because he doesn't conflict with
1713 * his own locks.
1714 */
1715 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1716 wakeupNeeded = true;
1717
1718 /*
1719 * Now fix the per-proclock state.
1720 */
1721 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1722 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1723
1724 return wakeupNeeded;
1725}

References Assert(), LockMethodData::conflictTab, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4745 of file lock.c.

4746{
4747 LOCKTAG tag;
4748 PGPROC *proc;
4750
4752
4754 /* no vxid lock; localTransactionId is a normal, locked XID */
4755 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4756
4758
4759 /*
4760 * If a lock table entry must be made, this is the PGPROC on whose behalf
4761 * it must be done. Note that the transaction might end or the PGPROC
4762 * might be reassigned to a new backend before we get around to examining
4763 * it, but it doesn't matter. If we find upon examination that the
4764 * relevant lxid is no longer running here, that's enough to prove that
4765 * it's no longer running anywhere.
4766 */
4767 proc = ProcNumberGetProc(vxid.procNumber);
4768 if (proc == NULL)
4769 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4770
4771 /*
4772 * We must acquire this lock before checking the procNumber and lxid
4773 * against the ones we're waiting for. The target backend will only set
4774 * or clear lxid while holding this lock.
4775 */
4777
4778 if (proc->vxid.procNumber != vxid.procNumber
4780 {
4781 /* VXID ended */
4782 LWLockRelease(&proc->fpInfoLock);
4783 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4784 }
4785
4786 /*
4787 * If we aren't asked to wait, there's no need to set up a lock table
4788 * entry. The transaction is still in progress, so just return false.
4789 */
4790 if (!wait)
4791 {
4792 LWLockRelease(&proc->fpInfoLock);
4793 return false;
4794 }
4795
4796 /*
4797 * OK, we're going to need to sleep on the VXID. But first, we must set
4798 * up the primary lock table entry, if needed (ie, convert the proc's
4799 * fast-path lock on its VXID to a regular lock).
4800 */
4801 if (proc->fpVXIDLock)
4802 {
4803 PROCLOCK *proclock;
4804 uint32 hashcode;
4805 LWLock *partitionLock;
4806
4807 hashcode = LockTagHashCode(&tag);
4808
4809 partitionLock = LockHashPartitionLock(hashcode);
4810 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4811
4813 &tag, hashcode, ExclusiveLock);
4814 if (!proclock)
4815 {
4816 LWLockRelease(partitionLock);
4817 LWLockRelease(&proc->fpInfoLock);
4818 ereport(ERROR,
4819 (errcode(ERRCODE_OUT_OF_MEMORY),
4820 errmsg("out of shared memory"),
4821 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4822 }
4823 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4824
4825 LWLockRelease(partitionLock);
4826
4827 proc->fpVXIDLock = false;
4828 }
4829
4830 /*
4831 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4832 * search. The proc might have assigned this XID but not yet locked it,
4833 * in which case the proc will lock this XID before releasing the VXID.
4834 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4835 * so we won't save an XID of a different VXID. It doesn't matter whether
4836 * we save this before or after setting up the primary lock table entry.
4837 */
4838 xid = proc->xid;
4839
4840 /* Done with proc->fpLockBits */
4841 LWLockRelease(&proc->fpInfoLock);
4842
4843 /* Time to wait. */
4844 (void) LockAcquire(&tag, ShareLock, false, false);
4845
4846 LockRelease(&tag, ShareLock, false);
4847 return XactLockForVirtualXact(vxid, xid, wait);
4848}
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4694
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:809
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:71
#define ShareLock
Definition: lockdefs.h:40
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition: procarray.c:3100
#define InvalidTransactionId
Definition: transam.h:31

References Assert(), DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, VirtualTransactionId::procNumber, PGPROC::procNumber, ProcNumberGetProc(), SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, PGPROC::vxid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4645 of file lock.c.

4646{
4647 bool fastpath;
4648 LocalTransactionId lxid;
4649
4651
4652 /*
4653 * Clean up shared memory state.
4654 */
4656
4657 fastpath = MyProc->fpVXIDLock;
4659 MyProc->fpVXIDLock = false;
4661
4663
4664 /*
4665 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4666 * that means someone transferred the lock to the main lock table.
4667 */
4668 if (!fastpath && LocalTransactionIdIsValid(lxid))
4669 {
4671 LOCKTAG locktag;
4672
4673 vxid.procNumber = MyProcNumber;
4674 vxid.localTransactionId = lxid;
4675 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4676
4678 &locktag, ExclusiveLock, false);
4679 }
4680}
uint32 LocalTransactionId
Definition: c.h:660
ProcNumber MyProcNumber
Definition: globals.c:90
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:68

References Assert(), DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, INVALID_PROC_NUMBER, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, MyProcNumber, VirtualTransactionId::procNumber, PGPROC::procNumber, SET_LOCKTAG_VIRTUALTRANSACTION, and PGPROC::vxid.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static ProcWaitStatus WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1932 of file lock.c.

1933{
1934 ProcWaitStatus result;
1935 ErrorContextCallback waiterrcontext;
1936
1937 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1938 locallock->tag.lock.locktag_field2,
1939 locallock->tag.lock.locktag_field3,
1940 locallock->tag.lock.locktag_field4,
1941 locallock->tag.lock.locktag_type,
1942 locallock->tag.mode);
1943
1944 /* Setup error traceback support for ereport() */
1945 waiterrcontext.callback = waitonlock_error_callback;
1946 waiterrcontext.arg = (void *) locallock;
1947 waiterrcontext.previous = error_context_stack;
1948 error_context_stack = &waiterrcontext;
1949
1950 /* adjust the process title to indicate that it's waiting */
1951 set_ps_display_suffix("waiting");
1952
1953 /*
1954 * Record the fact that we are waiting for a lock, so that
1955 * LockErrorCleanup will clean up if cancel/die happens.
1956 */
1957 awaitedLock = locallock;
1958 awaitedOwner = owner;
1959
1960 /*
1961 * NOTE: Think not to put any shared-state cleanup after the call to
1962 * ProcSleep, in either the normal or failure path. The lock state must
1963 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1964 * waiting for the lock. This is necessary because of the possibility
1965 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1966 * grants us the lock, but before we've noticed it. Hence, after granting,
1967 * the locktable state must fully reflect the fact that we own the lock;
1968 * we can't do additional work on return.
1969 *
1970 * We can and do use a PG_TRY block to try to clean up after failure, but
1971 * this still has a major limitation: elog(FATAL) can occur while waiting
1972 * (eg, a "die" interrupt), and then control won't come back here. So all
1973 * cleanup of essential state should happen in LockErrorCleanup, not here.
1974 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1975 * is unimportant if the process exits.
1976 */
1977 PG_TRY();
1978 {
1979 result = ProcSleep(locallock);
1980 }
1981 PG_CATCH();
1982 {
1983 /* In this path, awaitedLock remains set until LockErrorCleanup */
1984
1985 /* reset ps display to remove the suffix */
1987
1988 /* and propagate the error */
1989 PG_RE_THROW();
1990 }
1991 PG_END_TRY();
1992
1993 /*
1994 * We no longer want LockErrorCleanup to do anything.
1995 */
1996 awaitedLock = NULL;
1997
1998 /* reset ps display to remove the suffix */
2000
2001 error_context_stack = waiterrcontext.previous;
2002
2003 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2004 locallock->tag.lock.locktag_field2,
2005 locallock->tag.lock.locktag_field3,
2006 locallock->tag.lock.locktag_field4,
2007 locallock->tag.lock.locktag_type,
2008 locallock->tag.mode);
2009
2010 return result;
2011}
ErrorContextCallback * error_context_stack
Definition: elog.c:95
#define PG_RE_THROW()
Definition: elog.h:405
#define PG_TRY(...)
Definition: elog.h:372
#define PG_END_TRY(...)
Definition: elog.h:397
#define PG_CATCH(...)
Definition: elog.h:382
static void waitonlock_error_callback(void *arg)
Definition: lock.c:2020
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:439
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:387
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition: proc.c:1309
struct ErrorContextCallback * previous
Definition: elog.h:297
void(* callback)(void *arg)
Definition: elog.h:298
uint16 locktag_field4
Definition: lock.h:171

References ErrorContextCallback::arg, awaitedLock, awaitedOwner, ErrorContextCallback::callback, error_context_stack, LOCALLOCKTAG::lock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_type, LOCALLOCKTAG::mode, PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, ErrorContextCallback::previous, ProcSleep(), set_ps_display_remove_suffix(), set_ps_display_suffix(), LOCALLOCK::tag, and waitonlock_error_callback().

Referenced by LockAcquireExtended().

◆ waitonlock_error_callback()

static void waitonlock_error_callback ( void *  arg)
static

Definition at line 2020 of file lock.c.

2021{
2022 LOCALLOCK *locallock = (LOCALLOCK *) arg;
2023 const LOCKTAG *tag = &locallock->tag.lock;
2024 LOCKMODE mode = locallock->tag.mode;
2025 StringInfoData locktagbuf;
2026
2027 initStringInfo(&locktagbuf);
2028 DescribeLockTag(&locktagbuf, tag);
2029
2030 errcontext("waiting for %s on %s",
2032 locktagbuf.data);
2033}
#define errcontext
Definition: elog.h:198
void * arg

References arg, StringInfoData::data, DescribeLockTag(), errcontext, GetLockmodeName(), initStringInfo(), LOCALLOCKTAG::lock, LOCKTAG::locktag_lockmethodid, mode, LOCALLOCKTAG::mode, and LOCALLOCK::tag.

Referenced by WaitOnLock().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4694 of file lock.c.

4696{
4697 bool more = false;
4698
4699 /* There is no point to wait for 2PCs if you have no 2PCs. */
4700 if (max_prepared_xacts == 0)
4701 return true;
4702
4703 do
4704 {
4706 LOCKTAG tag;
4707
4708 /* Clear state from previous iterations. */
4709 if (more)
4710 {
4712 more = false;
4713 }
4714
4715 /* If we have no xid, try to find one. */
4716 if (!TransactionIdIsValid(xid))
4717 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4718 if (!TransactionIdIsValid(xid))
4719 {
4720 Assert(!more);
4721 return true;
4722 }
4723
4724 /* Check or wait for XID completion. */
4725 SET_LOCKTAG_TRANSACTION(tag, xid);
4726 lar = LockAcquire(&tag, ShareLock, false, !wait);
4727 if (lar == LOCKACQUIRE_NOT_AVAIL)
4728 return false;
4729 LockRelease(&tag, ShareLock, false);
4730 } while (more);
4731
4732 return true;
4733}
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:228
LockAcquireResult
Definition: lock.h:502
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:857

References Assert(), InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 328 of file lock.c.

Referenced by GetAwaitedLock(), GrantAwaitedLock(), ResetAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 329 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition: lock.c:122
static const char *const lock_mode_names[]
Definition: lock.c:108
static const LOCKMASK LockConflicts[]
Definition: lock.c:65

Definition at line 125 of file lock.c.

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 122 of file lock.c.

◆ FastPathLocalUseCounts

int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
static

◆ FastPathLockGroupsPerBackend

int FastPathLockGroupsPerBackend = 0

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 108 of file lock.c.

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 65 of file lock.c.

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ log_lock_failures

bool log_lock_failures = false

Definition at line 54 of file lock.c.

Referenced by heap_acquire_tuplock(), heap_lock_tuple(), and heapam_tuple_lock().

◆ max_locks_per_xact

int max_locks_per_xact

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 191 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 327 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 136 of file lock.c.