Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
dynahash.c File Reference
#include "postgres.h"
#include <limits.h>
#include "access/xact.h"
#include "common/hashfn.h"
#include "lib/ilist.h"
#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/memutils.h"
Include dependency graph for dynahash.c:

Go to the source code of this file.

Data Structures

struct  FreeListData
 
struct  HASHHDR
 
struct  HTAB
 

Macros

#define DEF_SEGSIZE   256
 
#define DEF_SEGSIZE_SHIFT   8 /* must be log2(DEF_SEGSIZE) */
 
#define DEF_DIRSIZE   256
 
#define NUM_FREELISTS   32
 
#define IS_PARTITIONED(hctl)   ((hctl)->num_partitions != 0)
 
#define FREELIST_IDX(hctl, hashcode)    (IS_PARTITIONED(hctl) ? (hashcode) % NUM_FREELISTS : 0)
 
#define ELEMENTKEY(helem)   (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))
 
#define ELEMENT_FROM_KEY(key)    ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))
 
#define MOD(x, y)   ((x) & ((y)-1))
 
#define MAX_SEQ_SCANS   100
 

Typedefs

typedef HASHELEMENTHASHBUCKET
 
typedef HASHBUCKETHASHSEGMENT
 

Functions

static void * DynaHashAlloc (Size size)
 
static HASHSEGMENT seg_alloc (HTAB *hashp)
 
static bool element_alloc (HTAB *hashp, int nelem, int freelist_idx)
 
static bool dir_realloc (HTAB *hashp)
 
static bool expand_table (HTAB *hashp)
 
static HASHBUCKET get_hash_entry (HTAB *hashp, int freelist_idx)
 
static void hdefault (HTAB *hashp)
 
static int choose_nelem_alloc (Size entrysize)
 
static bool init_htab (HTAB *hashp, int64 nelem)
 
static pg_noreturn void hash_corrupted (HTAB *hashp)
 
static uint32 hash_initial_lookup (HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr)
 
static int my_log2 (int64 num)
 
static int64 next_pow2_int64 (int64 num)
 
static int next_pow2_int (int64 num)
 
static void register_seq_scan (HTAB *hashp)
 
static void deregister_seq_scan (HTAB *hashp)
 
static bool has_seq_scans (HTAB *hashp)
 
static int string_compare (const char *key1, const char *key2, Size keysize)
 
HTABhash_create (const char *tabname, int64 nelem, const HASHCTL *info, int flags)
 
Size hash_estimate_size (int64 num_entries, Size entrysize)
 
int64 hash_select_dirsize (int64 num_entries)
 
Size hash_get_shared_size (HASHCTL *info, int flags)
 
void hash_destroy (HTAB *hashp)
 
void hash_stats (const char *caller, HTAB *hashp)
 
uint32 get_hash_value (HTAB *hashp, const void *keyPtr)
 
static uint32 calc_bucket (HASHHDR *hctl, uint32 hash_val)
 
void * hash_search (HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
 
void * hash_search_with_hash_value (HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
 
bool hash_update_hash_key (HTAB *hashp, void *existingEntry, const void *newKeyPtr)
 
int64 hash_get_num_entries (HTAB *hashp)
 
void hash_seq_init (HASH_SEQ_STATUS *status, HTAB *hashp)
 
void hash_seq_init_with_hash_value (HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
 
void * hash_seq_search (HASH_SEQ_STATUS *status)
 
void hash_seq_term (HASH_SEQ_STATUS *status)
 
void hash_freeze (HTAB *hashp)
 
void AtEOXact_HashTables (bool isCommit)
 
void AtEOSubXact_HashTables (bool isCommit, int nestDepth)
 

Variables

static MemoryContext CurrentDynaHashCxt = NULL
 
static HTABseq_scan_tables [MAX_SEQ_SCANS]
 
static int seq_scan_level [MAX_SEQ_SCANS]
 
static int num_seq_scans = 0
 

Macro Definition Documentation

◆ DEF_DIRSIZE

#define DEF_DIRSIZE   256

Definition at line 125 of file dynahash.c.

◆ DEF_SEGSIZE

#define DEF_SEGSIZE   256

Definition at line 123 of file dynahash.c.

◆ DEF_SEGSIZE_SHIFT

#define DEF_SEGSIZE_SHIFT   8 /* must be log2(DEF_SEGSIZE) */

Definition at line 124 of file dynahash.c.

◆ ELEMENT_FROM_KEY

#define ELEMENT_FROM_KEY (   key)     ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))

Definition at line 260 of file dynahash.c.

◆ ELEMENTKEY

#define ELEMENTKEY (   helem)    (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))

Definition at line 255 of file dynahash.c.

◆ FREELIST_IDX

#define FREELIST_IDX (   hctl,
  hashcode 
)     (IS_PARTITIONED(hctl) ? (hashcode) % NUM_FREELISTS : 0)

Definition at line 214 of file dynahash.c.

◆ IS_PARTITIONED

#define IS_PARTITIONED (   hctl)    ((hctl)->num_partitions != 0)

Definition at line 212 of file dynahash.c.

◆ MAX_SEQ_SCANS

#define MAX_SEQ_SCANS   100

Definition at line 1875 of file dynahash.c.

◆ MOD

#define MOD (   x,
  y 
)    ((x) & ((y)-1))

Definition at line 266 of file dynahash.c.

◆ NUM_FREELISTS

#define NUM_FREELISTS   32

Definition at line 128 of file dynahash.c.

Typedef Documentation

◆ HASHBUCKET

Definition at line 131 of file dynahash.c.

◆ HASHSEGMENT

Definition at line 134 of file dynahash.c.

Function Documentation

◆ AtEOSubXact_HashTables()

void AtEOSubXact_HashTables ( bool  isCommit,
int  nestDepth 
)

Definition at line 1957 of file dynahash.c.

1958{
1959 int i;
1960
1961 /*
1962 * Search backward to make cleanup easy. Note we must check all entries,
1963 * not only those at the end of the array, because deletion technique
1964 * doesn't keep them in order.
1965 */
1966 for (i = num_seq_scans - 1; i >= 0; i--)
1967 {
1968 if (seq_scan_level[i] >= nestDepth)
1969 {
1970 if (isCommit)
1971 elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1975 num_seq_scans--;
1976 }
1977 }
1978}
static HTAB * seq_scan_tables[MAX_SEQ_SCANS]
Definition: dynahash.c:1877
static int seq_scan_level[MAX_SEQ_SCANS]
Definition: dynahash.c:1878
static int num_seq_scans
Definition: dynahash.c:1879
#define WARNING
Definition: elog.h:36
#define elog(elevel,...)
Definition: elog.h:226
int i
Definition: isn.c:77

References elog, i, num_seq_scans, seq_scan_level, seq_scan_tables, and WARNING.

Referenced by AbortSubTransaction(), and CommitSubTransaction().

◆ AtEOXact_HashTables()

void AtEOXact_HashTables ( bool  isCommit)

Definition at line 1931 of file dynahash.c.

1932{
1933 /*
1934 * During abort cleanup, open scans are expected; just silently clean 'em
1935 * out. An open scan at commit means someone forgot a hash_seq_term()
1936 * call, so complain.
1937 *
1938 * Note: it's tempting to try to print the tabname here, but refrain for
1939 * fear of touching deallocated memory. This isn't a user-facing message
1940 * anyway, so it needn't be pretty.
1941 */
1942 if (isCommit)
1943 {
1944 int i;
1945
1946 for (i = 0; i < num_seq_scans; i++)
1947 {
1948 elog(WARNING, "leaked hash_seq_search scan for hash table %p",
1950 }
1951 }
1952 num_seq_scans = 0;
1953}

References elog, i, num_seq_scans, seq_scan_tables, and WARNING.

Referenced by AbortTransaction(), AutoVacLauncherMain(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), pgarch_archiveXlog(), PrepareTransaction(), WalSummarizerMain(), and WalWriterMain().

◆ calc_bucket()

static uint32 calc_bucket ( HASHHDR hctl,
uint32  hash_val 
)
inlinestatic

Definition at line 915 of file dynahash.c.

916{
917 uint32 bucket;
918
919 bucket = hash_val & hctl->high_mask;
920 if (bucket > hctl->max_bucket)
921 bucket = bucket & hctl->low_mask;
922
923 return bucket;
924}
uint32_t uint32
Definition: c.h:539
uint32 high_mask
Definition: dynahash.c:187
uint32 max_bucket
Definition: dynahash.c:186
uint32 low_mask
Definition: dynahash.c:188

References HASHHDR::high_mask, HASHHDR::low_mask, and HASHHDR::max_bucket.

Referenced by expand_table(), and hash_initial_lookup().

◆ choose_nelem_alloc()

static int choose_nelem_alloc ( Size  entrysize)
static

Definition at line 667 of file dynahash.c.

668{
669 int nelem_alloc;
670 Size elementSize;
671 Size allocSize;
672
673 /* Each element has a HASHELEMENT header plus user data. */
674 /* NB: this had better match element_alloc() */
675 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
676
677 /*
678 * The idea here is to choose nelem_alloc at least 32, but round up so
679 * that the allocation request will be a power of 2 or just less. This
680 * makes little difference for hash tables in shared memory, but for hash
681 * tables managed by palloc, the allocation request will be rounded up to
682 * a power of 2 anyway. If we fail to take this into account, we'll waste
683 * as much as half the allocated space.
684 */
685 allocSize = 32 * 4; /* assume elementSize at least 8 */
686 do
687 {
688 allocSize <<= 1;
689 nelem_alloc = allocSize / elementSize;
690 } while (nelem_alloc < 32);
691
692 return nelem_alloc;
693}
#define MAXALIGN(LEN)
Definition: c.h:811
size_t Size
Definition: c.h:611

References MAXALIGN.

Referenced by hash_estimate_size(), and init_htab().

◆ deregister_seq_scan()

static void deregister_seq_scan ( HTAB hashp)
static

Definition at line 1896 of file dynahash.c.

1897{
1898 int i;
1899
1900 /* Search backward since it's most likely at the stack top */
1901 for (i = num_seq_scans - 1; i >= 0; i--)
1902 {
1903 if (seq_scan_tables[i] == hashp)
1904 {
1907 num_seq_scans--;
1908 return;
1909 }
1910 }
1911 elog(ERROR, "no hash_seq_search scan for hash table \"%s\"",
1912 hashp->tabname);
1913}
#define ERROR
Definition: elog.h:39
char * tabname
Definition: dynahash.c:230

References elog, ERROR, i, num_seq_scans, seq_scan_level, seq_scan_tables, and HTAB::tabname.

Referenced by hash_seq_term().

◆ dir_realloc()

static bool dir_realloc ( HTAB hashp)
static

Definition at line 1643 of file dynahash.c.

1644{
1645 HASHSEGMENT *p;
1646 HASHSEGMENT *old_p;
1647 int64 new_dsize;
1648 int64 old_dirsize;
1649 int64 new_dirsize;
1650
1651 if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
1652 return false;
1653
1654 /* Reallocate directory */
1655 new_dsize = hashp->hctl->dsize << 1;
1656 old_dirsize = hashp->hctl->dsize * sizeof(HASHSEGMENT);
1657 new_dirsize = new_dsize * sizeof(HASHSEGMENT);
1658
1659 old_p = hashp->dir;
1660 CurrentDynaHashCxt = hashp->hcxt;
1661 p = (HASHSEGMENT *) hashp->alloc((Size) new_dirsize);
1662
1663 if (p != NULL)
1664 {
1665 memcpy(p, old_p, old_dirsize);
1666 MemSet(((char *) p) + old_dirsize, 0, new_dirsize - old_dirsize);
1667 hashp->dir = p;
1668 hashp->hctl->dsize = new_dsize;
1669
1670 /* XXX assume the allocator is palloc, so we know how to free */
1671 Assert(hashp->alloc == DynaHashAlloc);
1672 pfree(old_p);
1673
1674 return true;
1675 }
1676
1677 return false;
1678}
int64_t int64
Definition: c.h:536
#define MemSet(start, val, len)
Definition: c.h:1020
static void * DynaHashAlloc(Size size)
Definition: dynahash.c:297
static MemoryContext CurrentDynaHashCxt
Definition: dynahash.c:294
HASHBUCKET * HASHSEGMENT
Definition: dynahash.c:134
Assert(PointerIsAligned(start, uint64))
#define NO_MAX_DSIZE
Definition: hsearch.h:108
void pfree(void *pointer)
Definition: mcxt.c:1594
int64 dsize
Definition: dynahash.c:184
int64 max_dsize
Definition: dynahash.c:194
HASHHDR * hctl
Definition: dynahash.c:223
MemoryContext hcxt
Definition: dynahash.c:229
HashAllocFunc alloc
Definition: dynahash.c:228
HASHSEGMENT * dir
Definition: dynahash.c:224

References HTAB::alloc, Assert(), CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, DynaHashAlloc(), HTAB::hctl, HTAB::hcxt, HASHHDR::max_dsize, MemSet, NO_MAX_DSIZE, and pfree().

Referenced by expand_table().

◆ DynaHashAlloc()

static void * DynaHashAlloc ( Size  size)
static

Definition at line 297 of file dynahash.c.

298{
302}
#define MCXT_ALLOC_NO_OOM
Definition: fe_memutils.h:29
void * MemoryContextAllocExtended(MemoryContext context, Size size, int flags)
Definition: mcxt.c:1286
#define MemoryContextIsValid(context)
Definition: memnodes.h:145

References Assert(), CurrentDynaHashCxt, MCXT_ALLOC_NO_OOM, MemoryContextAllocExtended(), and MemoryContextIsValid.

Referenced by dir_realloc(), hash_create(), and hash_destroy().

◆ element_alloc()

static bool element_alloc ( HTAB hashp,
int  nelem,
int  freelist_idx 
)
static

Definition at line 1701 of file dynahash.c.

1702{
1703 HASHHDR *hctl = hashp->hctl;
1704 Size elementSize;
1705 Size requestSize;
1706 char *allocedBlock;
1707 HASHELEMENT *firstElement;
1708 HASHELEMENT *tmpElement;
1709 HASHELEMENT *prevElement;
1710 int i;
1711
1712 if (hctl->isfixed)
1713 return false;
1714
1715 /* Each element has a HASHELEMENT header plus user data. */
1716 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
1717
1718 requestSize = nelem * elementSize;
1719
1720 /* Add space for slist_node list link if we need one. */
1721#ifdef USE_VALGRIND
1722 if (!hashp->isshared)
1723 requestSize += MAXALIGN(sizeof(slist_node));
1724#endif
1725
1726 /* Allocate the memory. */
1727 CurrentDynaHashCxt = hashp->hcxt;
1728 allocedBlock = hashp->alloc(requestSize);
1729
1730 if (!allocedBlock)
1731 return false;
1732
1733 /*
1734 * If USE_VALGRIND, each allocated block of elements of a non-shared
1735 * hashtable is chained into a list, so that Valgrind won't think it's
1736 * been leaked.
1737 */
1738#ifdef USE_VALGRIND
1739 if (hashp->isshared)
1740 firstElement = (HASHELEMENT *) allocedBlock;
1741 else
1742 {
1743 slist_push_head(&hashp->element_blocks, (slist_node *) allocedBlock);
1744 firstElement = (HASHELEMENT *) (allocedBlock + MAXALIGN(sizeof(slist_node)));
1745 }
1746#else
1747 firstElement = (HASHELEMENT *) allocedBlock;
1748#endif
1749
1750 /* prepare to link all the new entries into the freelist */
1751 prevElement = NULL;
1752 tmpElement = firstElement;
1753 for (i = 0; i < nelem; i++)
1754 {
1755 tmpElement->link = prevElement;
1756 prevElement = tmpElement;
1757 tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);
1758 }
1759
1760 /* if partitioned, must lock to touch freeList */
1761 if (IS_PARTITIONED(hctl))
1762 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1763
1764 /* freelist could be nonempty if two backends did this concurrently */
1765 firstElement->link = hctl->freeList[freelist_idx].freeList;
1766 hctl->freeList[freelist_idx].freeList = prevElement;
1767
1768 if (IS_PARTITIONED(hctl))
1769 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1770
1771 return true;
1772}
#define IS_PARTITIONED(hctl)
Definition: dynahash.c:212
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:1006
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
slock_t mutex
Definition: dynahash.c:155
HASHELEMENT * freeList
Definition: dynahash.c:157
struct HASHELEMENT * link
Definition: hsearch.h:53
FreeListData freeList[NUM_FREELISTS]
Definition: dynahash.c:180
Size entrysize
Definition: dynahash.c:192
bool isfixed
Definition: dynahash.c:198
bool isshared
Definition: dynahash.c:231

References HTAB::alloc, CurrentDynaHashCxt, HASHHDR::entrysize, FreeListData::freeList, HASHHDR::freeList, HTAB::hctl, HTAB::hcxt, i, IS_PARTITIONED, HASHHDR::isfixed, HTAB::isshared, HASHELEMENT::link, MAXALIGN, FreeListData::mutex, slist_push_head(), SpinLockAcquire, and SpinLockRelease.

Referenced by get_hash_entry(), and hash_create().

◆ expand_table()

static bool expand_table ( HTAB hashp)
static

Definition at line 1546 of file dynahash.c.

1547{
1548 HASHHDR *hctl = hashp->hctl;
1549 HASHSEGMENT old_seg,
1550 new_seg;
1551 int64 old_bucket,
1552 new_bucket;
1553 int64 new_segnum,
1554 new_segndx;
1555 int64 old_segnum,
1556 old_segndx;
1557 HASHBUCKET *oldlink,
1558 *newlink;
1559 HASHBUCKET currElement,
1560 nextElement;
1561
1562 Assert(!IS_PARTITIONED(hctl));
1563
1564#ifdef HASH_STATISTICS
1565 hctl->expansions++;
1566#endif
1567
1568 new_bucket = hctl->max_bucket + 1;
1569 new_segnum = new_bucket >> hashp->sshift;
1570 new_segndx = MOD(new_bucket, hashp->ssize);
1571
1572 if (new_segnum >= hctl->nsegs)
1573 {
1574 /* Allocate new segment if necessary -- could fail if dir full */
1575 if (new_segnum >= hctl->dsize)
1576 if (!dir_realloc(hashp))
1577 return false;
1578 if (!(hashp->dir[new_segnum] = seg_alloc(hashp)))
1579 return false;
1580 hctl->nsegs++;
1581 }
1582
1583 /* OK, we created a new bucket */
1584 hctl->max_bucket++;
1585
1586 /*
1587 * *Before* changing masks, find old bucket corresponding to same hash
1588 * values; values in that bucket may need to be relocated to new bucket.
1589 * Note that new_bucket is certainly larger than low_mask at this point,
1590 * so we can skip the first step of the regular hash mask calc.
1591 */
1592 old_bucket = (new_bucket & hctl->low_mask);
1593
1594 /*
1595 * If we crossed a power of 2, readjust masks.
1596 */
1597 if ((uint32) new_bucket > hctl->high_mask)
1598 {
1599 hctl->low_mask = hctl->high_mask;
1600 hctl->high_mask = (uint32) new_bucket | hctl->low_mask;
1601 }
1602
1603 /*
1604 * Relocate records to the new bucket. NOTE: because of the way the hash
1605 * masking is done in calc_bucket, only one old bucket can need to be
1606 * split at this point. With a different way of reducing the hash value,
1607 * that might not be true!
1608 */
1609 old_segnum = old_bucket >> hashp->sshift;
1610 old_segndx = MOD(old_bucket, hashp->ssize);
1611
1612 old_seg = hashp->dir[old_segnum];
1613 new_seg = hashp->dir[new_segnum];
1614
1615 oldlink = &old_seg[old_segndx];
1616 newlink = &new_seg[new_segndx];
1617
1618 for (currElement = *oldlink;
1619 currElement != NULL;
1620 currElement = nextElement)
1621 {
1622 nextElement = currElement->link;
1623 if ((int64) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
1624 {
1625 *oldlink = currElement;
1626 oldlink = &currElement->link;
1627 }
1628 else
1629 {
1630 *newlink = currElement;
1631 newlink = &currElement->link;
1632 }
1633 }
1634 /* don't forget to terminate the rebuilt hash chains... */
1635 *oldlink = NULL;
1636 *newlink = NULL;
1637
1638 return true;
1639}
static HASHSEGMENT seg_alloc(HTAB *hashp)
Definition: dynahash.c:1682
#define MOD(x, y)
Definition: dynahash.c:266
static bool dir_realloc(HTAB *hashp)
Definition: dynahash.c:1643
static uint32 calc_bucket(HASHHDR *hctl, uint32 hash_val)
Definition: dynahash.c:915
uint32 hashvalue
Definition: hsearch.h:54
int64 nsegs
Definition: dynahash.c:185
int64 ssize
Definition: dynahash.c:238
int sshift
Definition: dynahash.c:239

References Assert(), calc_bucket(), HTAB::dir, dir_realloc(), HASHHDR::dsize, HASHELEMENT::hashvalue, HTAB::hctl, HASHHDR::high_mask, IS_PARTITIONED, HASHELEMENT::link, HASHHDR::low_mask, HASHHDR::max_bucket, MOD, HASHHDR::nsegs, seg_alloc(), HTAB::sshift, and HTAB::ssize.

Referenced by hash_search_with_hash_value().

◆ get_hash_entry()

static HASHBUCKET get_hash_entry ( HTAB hashp,
int  freelist_idx 
)
static

Definition at line 1251 of file dynahash.c.

1252{
1253 HASHHDR *hctl = hashp->hctl;
1254 HASHBUCKET newElement;
1255
1256 for (;;)
1257 {
1258 /* if partitioned, must lock to touch nentries and freeList */
1259 if (IS_PARTITIONED(hctl))
1260 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1261
1262 /* try to get an entry from the freelist */
1263 newElement = hctl->freeList[freelist_idx].freeList;
1264
1265 if (newElement != NULL)
1266 break;
1267
1268 if (IS_PARTITIONED(hctl))
1269 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1270
1271 /*
1272 * No free elements in this freelist. In a partitioned table, there
1273 * might be entries in other freelists, but to reduce contention we
1274 * prefer to first try to get another chunk of buckets from the main
1275 * shmem allocator. If that fails, though, we *MUST* root through all
1276 * the other freelists before giving up. There are multiple callers
1277 * that assume that they can allocate every element in the initially
1278 * requested table size, or that deleting an element guarantees they
1279 * can insert a new element, even if shared memory is entirely full.
1280 * Failing because the needed element is in a different freelist is
1281 * not acceptable.
1282 */
1283 if (!element_alloc(hashp, hctl->nelem_alloc, freelist_idx))
1284 {
1285 int borrow_from_idx;
1286
1287 if (!IS_PARTITIONED(hctl))
1288 return NULL; /* out of memory */
1289
1290 /* try to borrow element from another freelist */
1291 borrow_from_idx = freelist_idx;
1292 for (;;)
1293 {
1294 borrow_from_idx = (borrow_from_idx + 1) % NUM_FREELISTS;
1295 if (borrow_from_idx == freelist_idx)
1296 break; /* examined all freelists, fail */
1297
1298 SpinLockAcquire(&(hctl->freeList[borrow_from_idx].mutex));
1299 newElement = hctl->freeList[borrow_from_idx].freeList;
1300
1301 if (newElement != NULL)
1302 {
1303 hctl->freeList[borrow_from_idx].freeList = newElement->link;
1304 SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1305
1306 /* careful: count the new element in its proper freelist */
1307 SpinLockAcquire(&hctl->freeList[freelist_idx].mutex);
1308 hctl->freeList[freelist_idx].nentries++;
1309 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1310
1311 return newElement;
1312 }
1313
1314 SpinLockRelease(&(hctl->freeList[borrow_from_idx].mutex));
1315 }
1316
1317 /* no elements available to borrow either, so out of memory */
1318 return NULL;
1319 }
1320 }
1321
1322 /* remove entry from freelist, bump nentries */
1323 hctl->freeList[freelist_idx].freeList = newElement->link;
1324 hctl->freeList[freelist_idx].nentries++;
1325
1326 if (IS_PARTITIONED(hctl))
1327 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1328
1329 return newElement;
1330}
static bool element_alloc(HTAB *hashp, int nelem, int freelist_idx)
Definition: dynahash.c:1701
#define NUM_FREELISTS
Definition: dynahash.c:128
int64 nentries
Definition: dynahash.c:156
int nelem_alloc
Definition: dynahash.c:197

References element_alloc(), FreeListData::freeList, HASHHDR::freeList, HTAB::hctl, IS_PARTITIONED, HASHELEMENT::link, FreeListData::mutex, HASHHDR::nelem_alloc, FreeListData::nentries, NUM_FREELISTS, SpinLockAcquire, and SpinLockRelease.

Referenced by hash_search_with_hash_value().

◆ get_hash_value()

uint32 get_hash_value ( HTAB hashp,
const void *  keyPtr 
)

Definition at line 908 of file dynahash.c.

909{
910 return hashp->hash(keyPtr, hashp->keysize);
911}
HashValueFunc hash
Definition: dynahash.c:225
Size keysize
Definition: dynahash.c:237

References HTAB::hash, and HTAB::keysize.

Referenced by BufTableHashCode(), LockTagHashCode(), and lookup_type_cache().

◆ has_seq_scans()

static bool has_seq_scans ( HTAB hashp)
static

Definition at line 1917 of file dynahash.c.

1918{
1919 int i;
1920
1921 for (i = 0; i < num_seq_scans; i++)
1922 {
1923 if (seq_scan_tables[i] == hashp)
1924 return true;
1925 }
1926 return false;
1927}

References i, num_seq_scans, and seq_scan_tables.

Referenced by hash_freeze(), and hash_search_with_hash_value().

◆ hash_corrupted()

static void hash_corrupted ( HTAB hashp)
static

Definition at line 1803 of file dynahash.c.

1804{
1805 /*
1806 * If the corruption is in a shared hashtable, we'd better force a
1807 * systemwide restart. Otherwise, just shut down this one backend.
1808 */
1809 if (hashp->isshared)
1810 elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
1811 else
1812 elog(FATAL, "hash table \"%s\" corrupted", hashp->tabname);
1813}
#define FATAL
Definition: elog.h:41
#define PANIC
Definition: elog.h:42

References elog, FATAL, HTAB::isshared, PANIC, and HTAB::tabname.

Referenced by hash_initial_lookup().

◆ hash_create()

HTAB * hash_create ( const char *  tabname,
int64  nelem,
const HASHCTL info,
int  flags 
)

Definition at line 358 of file dynahash.c.

359{
360 HTAB *hashp;
361 HASHHDR *hctl;
362
363 /*
364 * Hash tables now allocate space for key and data, but you have to say
365 * how much space to allocate.
366 */
367 Assert(flags & HASH_ELEM);
368 Assert(info->keysize > 0);
369 Assert(info->entrysize >= info->keysize);
370
371 /*
372 * For shared hash tables, we have a local hash header (HTAB struct) that
373 * we allocate in TopMemoryContext; all else is in shared memory.
374 *
375 * For non-shared hash tables, everything including the hash header is in
376 * a memory context created specially for the hash table --- this makes
377 * hash_destroy very simple. The memory context is made a child of either
378 * a context specified by the caller, or TopMemoryContext if nothing is
379 * specified.
380 */
381 if (flags & HASH_SHARED_MEM)
382 {
383 /* Set up to allocate the hash header */
385 }
386 else
387 {
388 /* Create the hash table's private memory context */
389 if (flags & HASH_CONTEXT)
390 CurrentDynaHashCxt = info->hcxt;
391 else
394 "dynahash",
396 }
397
398 /* Initialize the hash header, plus a copy of the table name */
400 sizeof(HTAB) + strlen(tabname) + 1);
401 MemSet(hashp, 0, sizeof(HTAB));
402
403 hashp->tabname = (char *) (hashp + 1);
404 strcpy(hashp->tabname, tabname);
405
406 /* If we have a private context, label it with hashtable's name */
407 if (!(flags & HASH_SHARED_MEM))
409
410 /*
411 * Select the appropriate hash function (see comments at head of file).
412 */
413 if (flags & HASH_FUNCTION)
414 {
415 Assert(!(flags & (HASH_BLOBS | HASH_STRINGS)));
416 hashp->hash = info->hash;
417 }
418 else if (flags & HASH_BLOBS)
419 {
420 Assert(!(flags & HASH_STRINGS));
421 /* We can optimize hashing for common key sizes */
422 if (info->keysize == sizeof(uint32))
423 hashp->hash = uint32_hash;
424 else
425 hashp->hash = tag_hash;
426 }
427 else
428 {
429 /*
430 * string_hash used to be considered the default hash method, and in a
431 * non-assert build it effectively still is. But we now consider it
432 * an assertion error to not say HASH_STRINGS explicitly. To help
433 * catch mistaken usage of HASH_STRINGS, we also insist on a
434 * reasonably long string length: if the keysize is only 4 or 8 bytes,
435 * it's almost certainly an integer or pointer not a string.
436 */
437 Assert(flags & HASH_STRINGS);
438 Assert(info->keysize > 8);
439
440 hashp->hash = string_hash;
441 }
442
443 /*
444 * If you don't specify a match function, it defaults to string_compare if
445 * you used string_hash, and to memcmp otherwise.
446 *
447 * Note: explicitly specifying string_hash is deprecated, because this
448 * might not work for callers in loadable modules on some platforms due to
449 * referencing a trampoline instead of the string_hash function proper.
450 * Specify HASH_STRINGS instead.
451 */
452 if (flags & HASH_COMPARE)
453 hashp->match = info->match;
454 else if (hashp->hash == string_hash)
456 else
457 hashp->match = memcmp;
458
459 /*
460 * Similarly, the key-copying function defaults to strlcpy or memcpy.
461 */
462 if (flags & HASH_KEYCOPY)
463 hashp->keycopy = info->keycopy;
464 else if (hashp->hash == string_hash)
465 {
466 /*
467 * The signature of keycopy is meant for memcpy(), which returns
468 * void*, but strlcpy() returns size_t. Since we never use the return
469 * value of keycopy, and size_t is pretty much always the same size as
470 * void *, this should be safe. The extra cast in the middle is to
471 * avoid warnings from -Wcast-function-type.
472 */
474 }
475 else
476 hashp->keycopy = memcpy;
477
478 /* And select the entry allocation function, too. */
479 if (flags & HASH_ALLOC)
480 hashp->alloc = info->alloc;
481 else
482 hashp->alloc = DynaHashAlloc;
483
484 if (flags & HASH_SHARED_MEM)
485 {
486 /*
487 * ctl structure and directory are preallocated for shared memory
488 * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
489 * well.
490 */
491 hashp->hctl = info->hctl;
492 hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
493 hashp->hcxt = NULL;
494 hashp->isshared = true;
495
496 /* hash table already exists, we're just attaching to it */
497 if (flags & HASH_ATTACH)
498 {
499 /* make local copies of some heavily-used values */
500 hctl = hashp->hctl;
501 hashp->keysize = hctl->keysize;
502 hashp->ssize = hctl->ssize;
503 hashp->sshift = hctl->sshift;
504
505 return hashp;
506 }
507 }
508 else
509 {
510 /* setup hash table defaults */
511 hashp->hctl = NULL;
512 hashp->dir = NULL;
513 hashp->hcxt = CurrentDynaHashCxt;
514 hashp->isshared = false;
515 }
516
517 if (!hashp->hctl)
518 {
519 hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));
520 if (!hashp->hctl)
522 (errcode(ERRCODE_OUT_OF_MEMORY),
523 errmsg("out of memory")));
524 }
525
526 hashp->frozen = false;
527
528 hdefault(hashp);
529
530 hctl = hashp->hctl;
531
532 if (flags & HASH_PARTITION)
533 {
534 /* Doesn't make sense to partition a local hash table */
535 Assert(flags & HASH_SHARED_MEM);
536
537 /*
538 * The number of partitions had better be a power of 2. Also, it must
539 * be less than INT_MAX (see init_htab()), so call the int version of
540 * next_pow2.
541 */
543
544 hctl->num_partitions = info->num_partitions;
545 }
546
547 if (flags & HASH_SEGMENT)
548 {
549 hctl->ssize = info->ssize;
550 hctl->sshift = my_log2(info->ssize);
551 /* ssize had better be a power of 2 */
552 Assert(hctl->ssize == (1L << hctl->sshift));
553 }
554
555 /*
556 * SHM hash tables have fixed directory size passed by the caller.
557 */
558 if (flags & HASH_DIRSIZE)
559 {
560 hctl->max_dsize = info->max_dsize;
561 hctl->dsize = info->dsize;
562 }
563
564 /* remember the entry sizes, too */
565 hctl->keysize = info->keysize;
566 hctl->entrysize = info->entrysize;
567
568 /* make local copies of heavily-used constant fields */
569 hashp->keysize = hctl->keysize;
570 hashp->ssize = hctl->ssize;
571 hashp->sshift = hctl->sshift;
572
573 /* Build the hash directory structure */
574 if (!init_htab(hashp, nelem))
575 elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname);
576
577 /*
578 * For a shared hash table, preallocate the requested number of elements.
579 * This reduces problems with run-time out-of-shared-memory conditions.
580 *
581 * For a non-shared hash table, preallocate the requested number of
582 * elements if it's less than our chosen nelem_alloc. This avoids wasting
583 * space if the caller correctly estimates a small table size.
584 */
585 if ((flags & HASH_SHARED_MEM) ||
586 nelem < hctl->nelem_alloc)
587 {
588 int i,
589 freelist_partitions,
590 nelem_alloc,
591 nelem_alloc_first;
592
593 /*
594 * If hash table is partitioned, give each freelist an equal share of
595 * the initial allocation. Otherwise only freeList[0] is used.
596 */
597 if (IS_PARTITIONED(hashp->hctl))
598 freelist_partitions = NUM_FREELISTS;
599 else
600 freelist_partitions = 1;
601
602 nelem_alloc = nelem / freelist_partitions;
603 if (nelem_alloc <= 0)
604 nelem_alloc = 1;
605
606 /*
607 * Make sure we'll allocate all the requested elements; freeList[0]
608 * gets the excess if the request isn't divisible by NUM_FREELISTS.
609 */
610 if (nelem_alloc * freelist_partitions < nelem)
611 nelem_alloc_first =
612 nelem - nelem_alloc * (freelist_partitions - 1);
613 else
614 nelem_alloc_first = nelem_alloc;
615
616 for (i = 0; i < freelist_partitions; i++)
617 {
618 int temp = (i == 0) ? nelem_alloc_first : nelem_alloc;
619
620 if (!element_alloc(hashp, temp, i))
622 (errcode(ERRCODE_OUT_OF_MEMORY),
623 errmsg("out of memory")));
624 }
625 }
626
627 /* Set isfixed if requested, but not till after we build initial entries */
628 if (flags & HASH_FIXED_SIZE)
629 hctl->isfixed = true;
630
631 return hashp;
632}
void(* pg_funcptr_t)(void)
Definition: c.h:461
static int next_pow2_int(int64 num)
Definition: dynahash.c:1839
static int string_compare(const char *key1, const char *key2, Size keysize)
Definition: dynahash.c:313
static void hdefault(HTAB *hashp)
Definition: dynahash.c:638
static int my_log2(int64 num)
Definition: dynahash.c:1817
static bool init_htab(HTAB *hashp, int64 nelem)
Definition: dynahash.c:700
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ereport(elevel,...)
Definition: elog.h:150
uint32 tag_hash(const void *key, Size keysize)
Definition: hashfn.c:677
uint32 uint32_hash(const void *key, Size keysize)
Definition: hashfn.c:688
uint32 string_hash(const void *key, Size keysize)
Definition: hashfn.c:660
#define HASH_KEYCOPY
Definition: hsearch.h:100
#define HASH_STRINGS
Definition: hsearch.h:96
int(* HashCompareFunc)(const void *key1, const void *key2, Size keysize)
Definition: hsearch.h:29
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_ALLOC
Definition: hsearch.h:101
#define HASH_DIRSIZE
Definition: hsearch.h:94
#define HASH_SEGMENT
Definition: hsearch.h:93
#define HASH_ATTACH
Definition: hsearch.h:104
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_SHARED_MEM
Definition: hsearch.h:103
#define HASH_FIXED_SIZE
Definition: hsearch.h:105
#define HASH_PARTITION
Definition: hsearch.h:92
void *(* HashCopyFunc)(void *dest, const void *src, Size keysize)
Definition: hsearch.h:37
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
MemoryContext TopMemoryContext
Definition: mcxt.c:166
void MemoryContextSetIdentifier(MemoryContext context, const char *id)
Definition: mcxt.c:658
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
HashAllocFunc alloc
Definition: hsearch.h:84
Size keysize
Definition: hsearch.h:75
int64 max_dsize
Definition: hsearch.h:73
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
HashCompareFunc match
Definition: hsearch.h:80
HASHHDR * hctl
Definition: hsearch.h:88
int64 ssize
Definition: hsearch.h:70
MemoryContext hcxt
Definition: hsearch.h:86
int64 num_partitions
Definition: hsearch.h:68
int64 dsize
Definition: hsearch.h:72
HashCopyFunc keycopy
Definition: hsearch.h:82
int64 ssize
Definition: dynahash.c:195
Size keysize
Definition: dynahash.c:191
int64 num_partitions
Definition: dynahash.c:193
int sshift
Definition: dynahash.c:196
Definition: dynahash.c:222
HashCompareFunc match
Definition: dynahash.c:226
HashCopyFunc keycopy
Definition: dynahash.c:227
bool frozen
Definition: dynahash.c:234

References HTAB::alloc, HASHCTL::alloc, ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, Assert(), CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, HASHCTL::dsize, DynaHashAlloc(), element_alloc(), elog, HASHHDR::entrysize, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HTAB::frozen, HTAB::hash, HASHCTL::hash, HASH_ALLOC, HASH_ATTACH, HASH_BLOBS, HASH_COMPARE, HASH_CONTEXT, HASH_DIRSIZE, HASH_ELEM, HASH_FIXED_SIZE, HASH_FUNCTION, HASH_KEYCOPY, HASH_PARTITION, HASH_SEGMENT, HASH_SHARED_MEM, HASH_STRINGS, HTAB::hctl, HASHCTL::hctl, HTAB::hcxt, HASHCTL::hcxt, hdefault(), i, init_htab(), IS_PARTITIONED, HASHHDR::isfixed, HTAB::isshared, HTAB::keycopy, HASHCTL::keycopy, HASHHDR::keysize, HTAB::keysize, HASHCTL::keysize, HTAB::match, HASHCTL::match, HASHHDR::max_dsize, HASHCTL::max_dsize, MemoryContextAlloc(), MemoryContextSetIdentifier(), MemSet, my_log2(), next_pow2_int(), NUM_FREELISTS, HASHHDR::num_partitions, HASHCTL::num_partitions, HASHHDR::sshift, HTAB::sshift, HASHHDR::ssize, HTAB::ssize, HASHCTL::ssize, string_compare(), string_hash(), strlcpy(), HTAB::tabname, tag_hash(), TopMemoryContext, and uint32_hash().

Referenced by _hash_finish_split(), _PG_init(), AddEventToPendingNotifies(), AddPendingSync(), assign_record_type_typmod(), begin_heap_rewrite(), build_colinfo_names_hash(), build_guc_variables(), build_join_rel_hash(), BuildEventTriggerCache(), cfunc_hashtable_init(), CheckForSessionAndXactLocks(), CompactCheckpointerRequestQueue(), compute_array_stats(), compute_tsvector_stats(), create_seq_hashtable(), createConnHash(), CreateLocalPredicateLockHash(), CreatePartitionDirectory(), do_autovacuum(), EnablePortalManager(), ExecInitModifyTable(), ExecuteTruncateGuts(), find_all_inheritors(), find_oper_cache_entry(), find_rendezvous_variable(), get_json_object_as_hash(), get_relation_notnullatts(), GetComboCommandId(), GetConnection(), gistInitBuildBuffers(), gistInitParentMap(), init_missing_cache(), init_procedure_caches(), init_rel_sync_cache(), init_timezone_hashtable(), init_ts_config_cache(), init_uncommitted_enum_types(), init_uncommitted_enum_values(), InitBufferManagerAccess(), InitializeAttoptCache(), InitializeRelfilenumberMap(), InitializeShippableCache(), InitializeTableSpaceCache(), InitLocalBuffers(), InitLockManagerAccess(), InitQueryHashTable(), InitRecoveryTransactionEnvironment(), InitSync(), json_unique_check_init(), load_categories_hash(), log_invalid_page(), logical_begin_heap_rewrite(), logicalrep_partmap_init(), logicalrep_relmap_init(), lookup_proof_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), pa_allocate_worker(), pg_get_backend_memory_contexts(), plpgsql_estate_setup(), PLy_add_exceptions(), populate_recordset_object_start(), process_syncing_tables_for_apply(), rebuild_database_list(), record_C_func(), RegisterExtensibleNodeEntry(), RelationCacheInitialize(), ReorderBufferAllocate(), ReorderBufferBuildTupleCidHash(), ReorderBufferToastInitHash(), ResetUnloggedRelationsInDbspaceDir(), ri_InitHashTables(), select_perl_context(), SerializePendingSyncs(), set_rtable_names(), ShmemInitHash(), smgropen(), transformGraph(), and XLogPrefetcherAllocate().

◆ hash_destroy()

void hash_destroy ( HTAB hashp)

Definition at line 865 of file dynahash.c.

866{
867 if (hashp != NULL)
868 {
869 /* allocation method must be one we know how to free, too */
870 Assert(hashp->alloc == DynaHashAlloc);
871 /* so this hashtable must have its own context */
872 Assert(hashp->hcxt != NULL);
873
874 hash_stats(__func__, hashp);
875
876 /*
877 * Free everything by destroying the hash table's memory context.
878 */
880 }
881}
void hash_stats(const char *caller, HTAB *hashp)
Definition: dynahash.c:884
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:469

References HTAB::alloc, Assert(), DynaHashAlloc(), hash_stats(), HTAB::hcxt, and MemoryContextDelete().

Referenced by _hash_finish_split(), CheckForSessionAndXactLocks(), CompactCheckpointerRequestQueue(), destroy_colinfo_names_hash(), do_autovacuum(), ExecuteTruncateGuts(), find_all_inheritors(), get_json_object_as_hash(), pg_get_backend_memory_contexts(), pgoutput_shutdown(), populate_recordset_object_end(), PostPrepare_PredicateLocks(), process_syncing_tables_for_apply(), ReleasePredicateLocksLocal(), ReorderBufferFreeTXN(), ReorderBufferToastReset(), ReorderBufferTruncateTXN(), ResetSequenceCaches(), ResetUnloggedRelationsInDbspaceDir(), SerializePendingSyncs(), set_rtable_names(), ShutdownRecoveryTransactionEnvironment(), XLogCheckInvalidPages(), and XLogPrefetcherFree().

◆ hash_estimate_size()

Size hash_estimate_size ( int64  num_entries,
Size  entrysize 
)

Definition at line 783 of file dynahash.c.

784{
785 Size size;
786 int64 nBuckets,
787 nSegments,
788 nDirEntries,
789 nElementAllocs,
790 elementSize,
791 elementAllocCnt;
792
793 /* estimate number of buckets wanted */
794 nBuckets = next_pow2_int64(num_entries);
795 /* # of segments needed for nBuckets */
796 nSegments = next_pow2_int64((nBuckets - 1) / DEF_SEGSIZE + 1);
797 /* directory entries */
798 nDirEntries = DEF_DIRSIZE;
799 while (nDirEntries < nSegments)
800 nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
801
802 /* fixed control info */
803 size = MAXALIGN(sizeof(HASHHDR)); /* but not HTAB, per above */
804 /* directory */
805 size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
806 /* segments */
807 size = add_size(size, mul_size(nSegments,
808 MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
809 /* elements --- allocated in groups of choose_nelem_alloc() entries */
810 elementAllocCnt = choose_nelem_alloc(entrysize);
811 nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;
812 elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
813 size = add_size(size,
814 mul_size(nElementAllocs,
815 mul_size(elementAllocCnt, elementSize)));
816
817 return size;
818}
#define DEF_DIRSIZE
Definition: dynahash.c:125
static int choose_nelem_alloc(Size entrysize)
Definition: dynahash.c:667
#define DEF_SEGSIZE
Definition: dynahash.c:123
static int64 next_pow2_int64(int64 num)
Definition: dynahash.c:1831
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510

References add_size(), choose_nelem_alloc(), DEF_DIRSIZE, DEF_SEGSIZE, MAXALIGN, mul_size(), and next_pow2_int64().

Referenced by BufTableShmemSize(), CalculateShmemSize(), LockManagerShmemSize(), pgss_memsize(), PredicateLockShmemSize(), and WaitEventCustomShmemSize().

◆ hash_freeze()

void hash_freeze ( HTAB hashp)

Definition at line 1529 of file dynahash.c.

1530{
1531 if (hashp->isshared)
1532 elog(ERROR, "cannot freeze shared hashtable \"%s\"", hashp->tabname);
1533 if (!hashp->frozen && has_seq_scans(hashp))
1534 elog(ERROR, "cannot freeze hashtable \"%s\" because it has active scans",
1535 hashp->tabname);
1536 hashp->frozen = true;
1537}
static bool has_seq_scans(HTAB *hashp)
Definition: dynahash.c:1917

References elog, ERROR, HTAB::frozen, has_seq_scans(), HTAB::isshared, and HTAB::tabname.

◆ hash_get_num_entries()

int64 hash_get_num_entries ( HTAB hashp)

Definition at line 1336 of file dynahash.c.

1337{
1338 int i;
1339 int64 sum = hashp->hctl->freeList[0].nentries;
1340
1341 /*
1342 * We currently don't bother with acquiring the mutexes; it's only
1343 * sensible to call this function if you've got lock on all partitions of
1344 * the table.
1345 */
1346 if (IS_PARTITIONED(hashp->hctl))
1347 {
1348 for (i = 1; i < NUM_FREELISTS; i++)
1349 sum += hashp->hctl->freeList[i].nentries;
1350 }
1351
1352 return sum;
1353}

References HASHHDR::freeList, HTAB::hctl, i, IS_PARTITIONED, FreeListData::nentries, and NUM_FREELISTS.

Referenced by build_guc_variables(), compute_array_stats(), compute_tsvector_stats(), entry_alloc(), entry_dealloc(), entry_reset(), EstimatePendingSyncsSpace(), EstimateUncommittedEnumsSpace(), get_crosstab_tuplestore(), get_explain_guc_options(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventCustomNames(), hash_stats(), pgss_shmem_shutdown(), ResetUnloggedRelationsInDbspaceDir(), SerializePendingSyncs(), transformGraph(), and XLogHaveInvalidPages().

◆ hash_get_shared_size()

Size hash_get_shared_size ( HASHCTL info,
int  flags 
)

Definition at line 854 of file dynahash.c.

855{
856 Assert(flags & HASH_DIRSIZE);
857 Assert(info->dsize == info->max_dsize);
858 return sizeof(HASHHDR) + info->dsize * sizeof(HASHSEGMENT);
859}
struct HASHHDR HASHHDR
Definition: hsearch.h:58

References Assert(), HASHCTL::dsize, HASH_DIRSIZE, and HASHCTL::max_dsize.

Referenced by ShmemInitHash().

◆ hash_initial_lookup()

static uint32 hash_initial_lookup ( HTAB hashp,
uint32  hashvalue,
HASHBUCKET **  bucketptr 
)
inlinestatic

Definition at line 1779 of file dynahash.c.

1780{
1781 HASHHDR *hctl = hashp->hctl;
1782 HASHSEGMENT segp;
1783 int64 segment_num;
1784 int64 segment_ndx;
1785 uint32 bucket;
1786
1787 bucket = calc_bucket(hctl, hashvalue);
1788
1789 segment_num = bucket >> hashp->sshift;
1790 segment_ndx = MOD(bucket, hashp->ssize);
1791
1792 segp = hashp->dir[segment_num];
1793
1794 if (segp == NULL)
1795 hash_corrupted(hashp);
1796
1797 *bucketptr = &segp[segment_ndx];
1798 return bucket;
1799}
static pg_noreturn void hash_corrupted(HTAB *hashp)
Definition: dynahash.c:1803

References calc_bucket(), HTAB::dir, hash_corrupted(), HTAB::hctl, MOD, HTAB::sshift, and HTAB::ssize.

Referenced by hash_search_with_hash_value(), hash_seq_init_with_hash_value(), and hash_update_hash_key().

◆ hash_search()

void * hash_search ( HTAB hashp,
const void *  keyPtr,
HASHACTION  action,
bool *  foundPtr 
)

Definition at line 952 of file dynahash.c.

956{
957 return hash_search_with_hash_value(hashp,
958 keyPtr,
959 hashp->hash(keyPtr, hashp->keysize),
960 action,
961 foundPtr);
962}
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:965

References generate_unaccent_rules::action, HTAB::hash, hash_search_with_hash_value(), and HTAB::keysize.

Referenced by _hash_finish_split(), _hash_splitbucket(), add_guc_variable(), add_join_rel(), add_to_names_hash(), AddEnumLabel(), AddEventToPendingNotifies(), AddPendingSync(), ApplyLogicalMappingFile(), assign_record_type_typmod(), AsyncExistsPendingNotify(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), build_guc_variables(), build_join_rel_hash(), BuildEventTriggerCache(), cfunc_hashtable_delete(), cfunc_hashtable_insert(), cfunc_hashtable_lookup(), CheckAndPromotePredicateLockRequest(), CheckForSerializableConflictOut(), CheckForSessionAndXactLocks(), colname_is_unique(), CompactCheckpointerRequestQueue(), compile_plperl_function(), compile_pltcl_function(), compute_array_stats(), compute_tsvector_stats(), createNewConnection(), define_custom_variable(), delete_rel_type_cache_if_needed(), deleteConnection(), do_autovacuum(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), DropPreparedStatement(), entry_alloc(), entry_dealloc(), entry_reset(), EnumTypeUncommitted(), EnumUncommitted(), EnumValuesCreate(), EventCacheLookup(), ExecInitModifyTable(), ExecLookupResultRelByOid(), ExecuteTruncateGuts(), ExtendBufferedRelLocal(), FetchPreparedStatement(), finalize_in_progress_typentries(), find_all_inheritors(), find_join_rel(), find_oper_cache_entry(), find_option(), find_relation_notnullatts(), find_rendezvous_variable(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPrivateRefCountEntry(), get_attribute_options(), get_cast_hashentry(), get_rel_sync_entry(), get_relation_notnullatts(), get_tablespace(), GetComboCommandId(), GetConnection(), getConnectionByName(), GetExtensibleNodeEntry(), getmissingattr(), GetPrivateRefCountEntry(), getState(), GetWaitEventCustomIdentifier(), gistGetNodeBuffer(), gistGetParent(), gistMemorizeParent(), gistRelocateBuildBuffersOnSplit(), hash_object_field_end(), init_sequence(), insert_rel_type_cache_if_needed(), InvalidateAttoptCacheCallback(), InvalidateLocalBuffer(), InvalidateOprCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), is_shippable(), JsObjectGetField(), json_unique_check_key(), LocalBufferAlloc(), LockAcquireExtended(), LockHasWaiters(), LockHeldByMe(), LockRelease(), log_invalid_page(), logical_rewrite_log_mapping(), logicalrep_partition_open(), logicalrep_rel_open(), logicalrep_relmap_update(), lookup_C_func(), lookup_proof_cache(), lookup_ts_config_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), make_oper_cache_entry(), MarkGUCPrefixReserved(), pa_allocate_worker(), pa_find_worker(), pa_free_worker(), PartitionDirectoryLookup(), pg_get_backend_memory_contexts(), pg_tzset(), pgss_store(), plperl_spi_exec_prepared(), plperl_spi_freeplan(), plperl_spi_prepare(), plperl_spi_query_prepared(), pltcl_fetch_interp(), PLy_commit(), PLy_generate_spi_exceptions(), PLy_procedure_get(), PLy_rollback(), PLy_spi_subtransaction_abort(), populate_recordset_object_field_end(), predicatelock_twophase_recover(), PredicateLockExists(), PredicateLockShmemInit(), PredicateLockTwoPhaseFinish(), PrefetchLocalBuffer(), process_syncing_tables_for_apply(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), PutMemoryContextsStatsTupleStore(), rebuild_database_list(), record_C_func(), RegisterExtensibleNodeEntry(), RegisterPredicateLockingXid(), rel_sync_cache_relation_cb(), RelationPreTruncate(), ReleaseOneSerializableXact(), RelFileLocatorSkippingWAL(), RelfilenumberMapInvalidateCallback(), RelidByRelfilenumber(), RememberSyncRequest(), RemoveLocalLock(), ReorderBufferBuildTupleCidHash(), ReorderBufferCleanupTXN(), ReorderBufferToastAppendChunk(), ReorderBufferToastReplace(), ReorderBufferTXNByXid(), ReservePrivateRefCountEntry(), ResetUnloggedRelationsInDbspaceDir(), ResolveCminCmaxDuringDecoding(), RestoreUncommittedEnums(), rewrite_heap_dead_tuple(), rewrite_heap_tuple(), ri_FetchPreparedPlan(), ri_HashCompareOp(), ri_HashPreparedPlan(), ri_LoadConstraintInfo(), select_perl_context(), SerializePendingSyncs(), set_rtable_names(), ShmemInitStruct(), smgrdestroy(), smgrDoPendingSyncs(), smgropen(), smgrreleaserellocator(), StandbyAcquireAccessExclusiveLock(), StandbyReleaseAllLocks(), StandbyReleaseLocks(), StandbyReleaseOldLocks(), StandbyReleaseXidEntryLocks(), StorePreparedStatement(), table_recheck_autovac(), TypeCacheRelCallback(), WaitEventCustomNew(), XLogPrefetcherAddFilter(), XLogPrefetcherCompleteFilters(), and XLogPrefetcherIsFiltered().

◆ hash_search_with_hash_value()

void * hash_search_with_hash_value ( HTAB hashp,
const void *  keyPtr,
uint32  hashvalue,
HASHACTION  action,
bool *  foundPtr 
)

Definition at line 965 of file dynahash.c.

970{
971 HASHHDR *hctl = hashp->hctl;
972 int freelist_idx = FREELIST_IDX(hctl, hashvalue);
973 Size keysize;
974 HASHBUCKET currBucket;
975 HASHBUCKET *prevBucketPtr;
976 HashCompareFunc match;
977
978#ifdef HASH_STATISTICS
979 hctl->accesses++;
980#endif
981
982 /*
983 * If inserting, check if it is time to split a bucket.
984 *
985 * NOTE: failure to expand table is not a fatal error, it just means we
986 * have to run at higher fill factor than we wanted. However, if we're
987 * using the palloc allocator then it will throw error anyway on
988 * out-of-memory, so we must do this before modifying the table.
989 */
991 {
992 /*
993 * Can't split if running in partitioned mode, nor if frozen, nor if
994 * table is the subject of any active hash_seq_search scans.
995 */
996 if (hctl->freeList[0].nentries > (int64) hctl->max_bucket &&
997 !IS_PARTITIONED(hctl) && !hashp->frozen &&
998 !has_seq_scans(hashp))
999 (void) expand_table(hashp);
1000 }
1001
1002 /*
1003 * Do the initial lookup
1004 */
1005 (void) hash_initial_lookup(hashp, hashvalue, &prevBucketPtr);
1006 currBucket = *prevBucketPtr;
1007
1008 /*
1009 * Follow collision chain looking for matching key
1010 */
1011 match = hashp->match; /* save one fetch in inner loop */
1012 keysize = hashp->keysize; /* ditto */
1013
1014 while (currBucket != NULL)
1015 {
1016 if (currBucket->hashvalue == hashvalue &&
1017 match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
1018 break;
1019 prevBucketPtr = &(currBucket->link);
1020 currBucket = *prevBucketPtr;
1021#ifdef HASH_STATISTICS
1022 hctl->collisions++;
1023#endif
1024 }
1025
1026 if (foundPtr)
1027 *foundPtr = (bool) (currBucket != NULL);
1028
1029 /*
1030 * OK, now what?
1031 */
1032 switch (action)
1033 {
1034 case HASH_FIND:
1035 if (currBucket != NULL)
1036 return ELEMENTKEY(currBucket);
1037 return NULL;
1038
1039 case HASH_REMOVE:
1040 if (currBucket != NULL)
1041 {
1042 /* if partitioned, must lock to touch nentries and freeList */
1043 if (IS_PARTITIONED(hctl))
1044 SpinLockAcquire(&(hctl->freeList[freelist_idx].mutex));
1045
1046 /* delete the record from the appropriate nentries counter. */
1047 Assert(hctl->freeList[freelist_idx].nentries > 0);
1048 hctl->freeList[freelist_idx].nentries--;
1049
1050 /* remove record from hash bucket's chain. */
1051 *prevBucketPtr = currBucket->link;
1052
1053 /* add the record to the appropriate freelist. */
1054 currBucket->link = hctl->freeList[freelist_idx].freeList;
1055 hctl->freeList[freelist_idx].freeList = currBucket;
1056
1057 if (IS_PARTITIONED(hctl))
1058 SpinLockRelease(&hctl->freeList[freelist_idx].mutex);
1059
1060 /*
1061 * better hope the caller is synchronizing access to this
1062 * element, because someone else is going to reuse it the next
1063 * time something is added to the table
1064 */
1065 return ELEMENTKEY(currBucket);
1066 }
1067 return NULL;
1068
1069 case HASH_ENTER:
1070 case HASH_ENTER_NULL:
1071 /* Return existing element if found, else create one */
1072 if (currBucket != NULL)
1073 return ELEMENTKEY(currBucket);
1074
1075 /* disallow inserts if frozen */
1076 if (hashp->frozen)
1077 elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
1078 hashp->tabname);
1079
1080 currBucket = get_hash_entry(hashp, freelist_idx);
1081 if (currBucket == NULL)
1082 {
1083 /* out of memory */
1084 if (action == HASH_ENTER_NULL)
1085 return NULL;
1086 /* report a generic message */
1087 if (hashp->isshared)
1088 ereport(ERROR,
1089 (errcode(ERRCODE_OUT_OF_MEMORY),
1090 errmsg("out of shared memory")));
1091 else
1092 ereport(ERROR,
1093 (errcode(ERRCODE_OUT_OF_MEMORY),
1094 errmsg("out of memory")));
1095 }
1096
1097 /* link into hashbucket chain */
1098 *prevBucketPtr = currBucket;
1099 currBucket->link = NULL;
1100
1101 /* copy key into record */
1102 currBucket->hashvalue = hashvalue;
1103 hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);
1104
1105 /*
1106 * Caller is expected to fill the data field on return. DO NOT
1107 * insert any code that could possibly throw error here, as doing
1108 * so would leave the table entry incomplete and hence corrupt the
1109 * caller's data structure.
1110 */
1111
1112 return ELEMENTKEY(currBucket);
1113 }
1114
1115 elog(ERROR, "unrecognized hash action code: %d", (int) action);
1116
1117 return NULL; /* keep compiler quiet */
1118}
static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx)
Definition: dynahash.c:1251
static bool expand_table(HTAB *hashp)
Definition: dynahash.c:1546
#define ELEMENTKEY(helem)
Definition: dynahash.c:255
#define FREELIST_IDX(hctl, hashcode)
Definition: dynahash.c:214
static uint32 hash_initial_lookup(HTAB *hashp, uint32 hashvalue, HASHBUCKET **bucketptr)
Definition: dynahash.c:1779
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
@ HASH_ENTER_NULL
Definition: hsearch.h:116

References generate_unaccent_rules::action, Assert(), ELEMENTKEY, elog, ereport, errcode(), errmsg(), ERROR, expand_table(), FreeListData::freeList, HASHHDR::freeList, FREELIST_IDX, HTAB::frozen, get_hash_entry(), has_seq_scans(), HASH_ENTER, HASH_ENTER_NULL, HASH_FIND, hash_initial_lookup(), HASH_REMOVE, HASHELEMENT::hashvalue, HTAB::hctl, IS_PARTITIONED, HTAB::isshared, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, HASHHDR::max_bucket, FreeListData::mutex, FreeListData::nentries, SpinLockAcquire, SpinLockRelease, and HTAB::tabname.

Referenced by BufTableDelete(), BufTableInsert(), BufTableLookup(), CheckTargetForConflictsIn(), CleanUpLock(), ClearOldPredicateLocks(), CreatePredicateLock(), DecrementParentLocks(), DeleteChildTargetLocks(), DeleteLockTarget(), DropAllPredicateLocksFromTable(), FastPathGetRelationLockEntry(), GetLockConflicts(), hash_search(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockRelease(), LockWaiterCount(), PageIsPredicateLocked(), PredicateLockAcquire(), ReleaseOneSerializableXact(), RemoveScratchTarget(), RemoveTargetIfNoLongerUsed(), RestoreScratchTarget(), SetupLockInTable(), and TransferPredicateLocksToNewTarget().

◆ hash_select_dirsize()

int64 hash_select_dirsize ( int64  num_entries)

Definition at line 830 of file dynahash.c.

831{
832 int64 nBuckets,
833 nSegments,
834 nDirEntries;
835
836 /* estimate number of buckets wanted */
837 nBuckets = next_pow2_int64(num_entries);
838 /* # of segments needed for nBuckets */
839 nSegments = next_pow2_int64((nBuckets - 1) / DEF_SEGSIZE + 1);
840 /* directory entries */
841 nDirEntries = DEF_DIRSIZE;
842 while (nDirEntries < nSegments)
843 nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
844
845 return nDirEntries;
846}

References DEF_DIRSIZE, DEF_SEGSIZE, and next_pow2_int64().

Referenced by ShmemInitHash().

◆ hash_seq_init()

void hash_seq_init ( HASH_SEQ_STATUS status,
HTAB hashp 
)

Definition at line 1380 of file dynahash.c.

1381{
1382 status->hashp = hashp;
1383 status->curBucket = 0;
1384 status->curEntry = NULL;
1385 status->hasHashvalue = false;
1386 if (!hashp->frozen)
1387 register_seq_scan(hashp);
1388}
static void register_seq_scan(HTAB *hashp)
Definition: dynahash.c:1884
HASHELEMENT * curEntry
Definition: hsearch.h:124
uint32 curBucket
Definition: hsearch.h:123
HTAB * hashp
Definition: hsearch.h:122
bool hasHashvalue
Definition: hsearch.h:125

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::frozen, HASH_SEQ_STATUS::hasHashvalue, HASH_SEQ_STATUS::hashp, and register_seq_scan().

Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), BeginReportingGUCOptions(), CheckForBufferLeaks(), CheckForSessionAndXactLocks(), CheckTableForSerializableConflictIn(), cleanup_rel_sync_cache(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DestroyPartitionDirectory(), disconnect_cached_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), ExecuteTruncateGuts(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPortalSnapshots(), gc_qtexts(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventCustomNames(), hash_seq_init_with_hash_value(), HoldPinnedPortals(), InitializeGUCOptions(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), logical_end_heap_rewrite(), logical_heap_rewrite_flush_mappings(), logicalrep_partmap_invalidate_cb(), logicalrep_partmap_reset_relmap(), logicalrep_relmap_invalidate_cb(), MarkGUCPrefixReserved(), packGraph(), pg_cursor(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_prepared_statement(), pg_stat_statements_internal(), pgfdw_inval_callback(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), plperl_fini(), PortalErrorCleanup(), PortalHashTableDeleteAll(), postgres_fdw_get_connections_internal(), PostPrepare_Locks(), PreCommit_Portals(), ProcessConfigFileInternal(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), rel_sync_cache_publication_cb(), rel_sync_cache_relation_cb(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RelfilenumberMapInvalidateCallback(), RememberSyncRequest(), ReorderBufferToastReset(), selectColorTrigrams(), SerializePendingSyncs(), SerializeUncommittedEnums(), smgrDoPendingSyncs(), smgrreleaseall(), StandbyReleaseAllLocks(), StandbyReleaseOldLocks(), ThereAreNoReadyPortals(), TypeCacheOpcCallback(), TypeCacheRelCallback(), TypeCacheTypCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().

◆ hash_seq_init_with_hash_value()

void hash_seq_init_with_hash_value ( HASH_SEQ_STATUS status,
HTAB hashp,
uint32  hashvalue 
)

Definition at line 1400 of file dynahash.c.

1402{
1403 HASHBUCKET *bucketPtr;
1404
1405 hash_seq_init(status, hashp);
1406
1407 status->hasHashvalue = true;
1408 status->hashvalue = hashvalue;
1409
1410 status->curBucket = hash_initial_lookup(hashp, hashvalue, &bucketPtr);
1411 status->curEntry = *bucketPtr;
1412}
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
uint32 hashvalue
Definition: hsearch.h:126

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, hash_initial_lookup(), hash_seq_init(), HASH_SEQ_STATUS::hasHashvalue, and HASH_SEQ_STATUS::hashvalue.

Referenced by InvalidateAttoptCacheCallback(), and TypeCacheTypCallback().

◆ hash_seq_search()

void * hash_seq_search ( HASH_SEQ_STATUS status)

Definition at line 1415 of file dynahash.c.

1416{
1417 HTAB *hashp;
1418 HASHHDR *hctl;
1419 uint32 max_bucket;
1420 int64 ssize;
1421 int64 segment_num;
1422 int64 segment_ndx;
1423 HASHSEGMENT segp;
1424 uint32 curBucket;
1425 HASHELEMENT *curElem;
1426
1427 if (status->hasHashvalue)
1428 {
1429 /*
1430 * Scan entries only in the current bucket because only this bucket
1431 * can contain entries with the given hash value.
1432 */
1433 while ((curElem = status->curEntry) != NULL)
1434 {
1435 status->curEntry = curElem->link;
1436 if (status->hashvalue != curElem->hashvalue)
1437 continue;
1438 return (void *) ELEMENTKEY(curElem);
1439 }
1440
1441 hash_seq_term(status);
1442 return NULL;
1443 }
1444
1445 if ((curElem = status->curEntry) != NULL)
1446 {
1447 /* Continuing scan of curBucket... */
1448 status->curEntry = curElem->link;
1449 if (status->curEntry == NULL) /* end of this bucket */
1450 ++status->curBucket;
1451 return ELEMENTKEY(curElem);
1452 }
1453
1454 /*
1455 * Search for next nonempty bucket starting at curBucket.
1456 */
1457 curBucket = status->curBucket;
1458 hashp = status->hashp;
1459 hctl = hashp->hctl;
1460 ssize = hashp->ssize;
1461 max_bucket = hctl->max_bucket;
1462
1463 if (curBucket > max_bucket)
1464 {
1465 hash_seq_term(status);
1466 return NULL; /* search is done */
1467 }
1468
1469 /*
1470 * first find the right segment in the table directory.
1471 */
1472 segment_num = curBucket >> hashp->sshift;
1473 segment_ndx = MOD(curBucket, ssize);
1474
1475 segp = hashp->dir[segment_num];
1476
1477 /*
1478 * Pick up the first item in this bucket's chain. If chain is not empty
1479 * we can begin searching it. Otherwise we have to advance to find the
1480 * next nonempty bucket. We try to optimize that case since searching a
1481 * near-empty hashtable has to iterate this loop a lot.
1482 */
1483 while ((curElem = segp[segment_ndx]) == NULL)
1484 {
1485 /* empty bucket, advance to next */
1486 if (++curBucket > max_bucket)
1487 {
1488 status->curBucket = curBucket;
1489 hash_seq_term(status);
1490 return NULL; /* search is done */
1491 }
1492 if (++segment_ndx >= ssize)
1493 {
1494 segment_num++;
1495 segment_ndx = 0;
1496 segp = hashp->dir[segment_num];
1497 }
1498 }
1499
1500 /* Begin scan of curBucket... */
1501 status->curEntry = curElem->link;
1502 if (status->curEntry == NULL) /* end of this bucket */
1503 ++curBucket;
1504 status->curBucket = curBucket;
1505 return ELEMENTKEY(curElem);
1506}
void hash_seq_term(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1509

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::dir, ELEMENTKEY, hash_seq_term(), HASH_SEQ_STATUS::hasHashvalue, HASH_SEQ_STATUS::hashp, HASHELEMENT::hashvalue, HASH_SEQ_STATUS::hashvalue, HTAB::hctl, HASHELEMENT::link, HASHHDR::max_bucket, MOD, HTAB::sshift, and HTAB::ssize.

Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), BeginReportingGUCOptions(), CheckForBufferLeaks(), CheckForSessionAndXactLocks(), CheckTableForSerializableConflictIn(), cleanup_rel_sync_cache(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DestroyPartitionDirectory(), disconnect_cached_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), ExecuteTruncateGuts(), forget_invalid_pages(), forget_invalid_pages_db(), ForgetPortalSnapshots(), gc_qtexts(), get_guc_variables(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), GetWaitEventCustomNames(), HoldPinnedPortals(), InitializeGUCOptions(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateShippableCacheCallback(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), logical_end_heap_rewrite(), logical_heap_rewrite_flush_mappings(), logicalrep_partmap_invalidate_cb(), logicalrep_partmap_reset_relmap(), logicalrep_relmap_invalidate_cb(), MarkGUCPrefixReserved(), packGraph(), pg_cursor(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_prepared_statement(), pg_stat_statements_internal(), pgfdw_inval_callback(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), plperl_fini(), PortalErrorCleanup(), PortalHashTableDeleteAll(), postgres_fdw_get_connections_internal(), PostPrepare_Locks(), PreCommit_Portals(), ProcessConfigFileInternal(), ProcessSyncRequests(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), rel_sync_cache_publication_cb(), rel_sync_cache_relation_cb(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RelfilenumberMapInvalidateCallback(), RememberSyncRequest(), ReorderBufferToastReset(), selectColorTrigrams(), SerializePendingSyncs(), SerializeUncommittedEnums(), smgrDoPendingSyncs(), smgrreleaseall(), StandbyReleaseAllLocks(), StandbyReleaseOldLocks(), ThereAreNoReadyPortals(), TypeCacheOpcCallback(), TypeCacheRelCallback(), TypeCacheTypCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().

◆ hash_seq_term()

◆ hash_stats()

void hash_stats ( const char *  caller,
HTAB hashp 
)

Definition at line 884 of file dynahash.c.

885{
886#ifdef HASH_STATISTICS
887 HASHHDR *hctl = hashp->hctl;
888
889 elog(DEBUG4,
890 "hash_stats: Caller: %s Table Name: \"%s\" Accesses: " UINT64_FORMAT " Collisions: " UINT64_FORMAT " Expansions: " UINT64_FORMAT " Entries: " INT64_FORMAT " Key Size: %zu Max Bucket: %u Segment Count: " INT64_FORMAT,
891 caller != NULL ? caller : "(unknown)", hashp->tabname, hctl->accesses,
892 hctl->collisions, hctl->expansions, hash_get_num_entries(hashp),
893 hctl->keysize, hctl->max_bucket, hctl->nsegs);
894#endif
895}
#define INT64_FORMAT
Definition: c.h:557
#define UINT64_FORMAT
Definition: c.h:558
int64 hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1336
#define DEBUG4
Definition: elog.h:27

References DEBUG4, elog, hash_get_num_entries(), HTAB::hctl, INT64_FORMAT, HASHHDR::keysize, HASHHDR::max_bucket, HASHHDR::nsegs, HTAB::tabname, and UINT64_FORMAT.

Referenced by hash_destroy().

◆ hash_update_hash_key()

bool hash_update_hash_key ( HTAB hashp,
void *  existingEntry,
const void *  newKeyPtr 
)

Definition at line 1140 of file dynahash.c.

1143{
1144 HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry);
1145 uint32 newhashvalue;
1146 Size keysize;
1147 uint32 bucket;
1148 uint32 newbucket;
1149 HASHBUCKET currBucket;
1150 HASHBUCKET *prevBucketPtr;
1151 HASHBUCKET *oldPrevPtr;
1152 HashCompareFunc match;
1153
1154#ifdef HASH_STATISTICS
1155 HASHHDR *hctl = hashp->hctl;
1156
1157 hctl->accesses++;
1158#endif
1159
1160 /* disallow updates if frozen */
1161 if (hashp->frozen)
1162 elog(ERROR, "cannot update in frozen hashtable \"%s\"",
1163 hashp->tabname);
1164
1165 /*
1166 * Lookup the existing element using its saved hash value. We need to do
1167 * this to be able to unlink it from its hash chain, but as a side benefit
1168 * we can verify the validity of the passed existingEntry pointer.
1169 */
1170 bucket = hash_initial_lookup(hashp, existingElement->hashvalue,
1171 &prevBucketPtr);
1172 currBucket = *prevBucketPtr;
1173
1174 while (currBucket != NULL)
1175 {
1176 if (currBucket == existingElement)
1177 break;
1178 prevBucketPtr = &(currBucket->link);
1179 currBucket = *prevBucketPtr;
1180 }
1181
1182 if (currBucket == NULL)
1183 elog(ERROR, "hash_update_hash_key argument is not in hashtable \"%s\"",
1184 hashp->tabname);
1185
1186 oldPrevPtr = prevBucketPtr;
1187
1188 /*
1189 * Now perform the equivalent of a HASH_ENTER operation to locate the hash
1190 * chain we want to put the entry into.
1191 */
1192 newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
1193 newbucket = hash_initial_lookup(hashp, newhashvalue, &prevBucketPtr);
1194 currBucket = *prevBucketPtr;
1195
1196 /*
1197 * Follow collision chain looking for matching key
1198 */
1199 match = hashp->match; /* save one fetch in inner loop */
1200 keysize = hashp->keysize; /* ditto */
1201
1202 while (currBucket != NULL)
1203 {
1204 if (currBucket->hashvalue == newhashvalue &&
1205 match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0)
1206 break;
1207 prevBucketPtr = &(currBucket->link);
1208 currBucket = *prevBucketPtr;
1209#ifdef HASH_STATISTICS
1210 hctl->collisions++;
1211#endif
1212 }
1213
1214 if (currBucket != NULL)
1215 return false; /* collision with an existing entry */
1216
1217 currBucket = existingElement;
1218
1219 /*
1220 * If old and new hash values belong to the same bucket, we need not
1221 * change any chain links, and indeed should not since this simplistic
1222 * update will corrupt the list if currBucket is the last element. (We
1223 * cannot fall out earlier, however, since we need to scan the bucket to
1224 * check for duplicate keys.)
1225 */
1226 if (bucket != newbucket)
1227 {
1228 /* OK to remove record from old hash bucket's chain. */
1229 *oldPrevPtr = currBucket->link;
1230
1231 /* link into new hashbucket chain */
1232 *prevBucketPtr = currBucket;
1233 currBucket->link = NULL;
1234 }
1235
1236 /* copy new key into record */
1237 currBucket->hashvalue = newhashvalue;
1238 hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize);
1239
1240 /* rest of record is untouched */
1241
1242 return true;
1243}
#define ELEMENT_FROM_KEY(key)
Definition: dynahash.c:260

References ELEMENT_FROM_KEY, ELEMENTKEY, elog, ERROR, HTAB::frozen, HTAB::hash, hash_initial_lookup(), HASHELEMENT::hashvalue, HTAB::hctl, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, and HTAB::tabname.

Referenced by PostPrepare_Locks().

◆ hdefault()

static void hdefault ( HTAB hashp)
static

Definition at line 638 of file dynahash.c.

639{
640 HASHHDR *hctl = hashp->hctl;
641
642 MemSet(hctl, 0, sizeof(HASHHDR));
643
644 hctl->dsize = DEF_DIRSIZE;
645 hctl->nsegs = 0;
646
647 hctl->num_partitions = 0; /* not partitioned */
648
649 /* table has no fixed maximum size */
650 hctl->max_dsize = NO_MAX_DSIZE;
651
652 hctl->ssize = DEF_SEGSIZE;
654
655 hctl->isfixed = false; /* can be enlarged */
656
657#ifdef HASH_STATISTICS
658 hctl->accesses = hctl->collisions = hctl->expansions = 0;
659#endif
660}
#define DEF_SEGSIZE_SHIFT
Definition: dynahash.c:124

References DEF_DIRSIZE, DEF_SEGSIZE, DEF_SEGSIZE_SHIFT, HASHHDR::dsize, HTAB::hctl, HASHHDR::isfixed, HASHHDR::max_dsize, MemSet, NO_MAX_DSIZE, HASHHDR::nsegs, HASHHDR::num_partitions, HASHHDR::sshift, and HASHHDR::ssize.

Referenced by hash_create().

◆ init_htab()

static bool init_htab ( HTAB hashp,
int64  nelem 
)
static

Definition at line 700 of file dynahash.c.

701{
702 HASHHDR *hctl = hashp->hctl;
703 HASHSEGMENT *segp;
704 int nbuckets;
705 int nsegs;
706 int i;
707
708 /*
709 * initialize mutexes if it's a partitioned table
710 */
711 if (IS_PARTITIONED(hctl))
712 for (i = 0; i < NUM_FREELISTS; i++)
713 SpinLockInit(&(hctl->freeList[i].mutex));
714
715 /*
716 * Allocate space for the next greater power of two number of buckets,
717 * assuming a desired maximum load factor of 1.
718 */
719 nbuckets = next_pow2_int(nelem);
720
721 /*
722 * In a partitioned table, nbuckets must be at least equal to
723 * num_partitions; were it less, keys with apparently different partition
724 * numbers would map to the same bucket, breaking partition independence.
725 * (Normally nbuckets will be much bigger; this is just a safety check.)
726 */
727 while (nbuckets < hctl->num_partitions)
728 nbuckets <<= 1;
729
730 hctl->max_bucket = hctl->low_mask = nbuckets - 1;
731 hctl->high_mask = (nbuckets << 1) - 1;
732
733 /*
734 * Figure number of directory segments needed, round up to a power of 2
735 */
736 nsegs = (nbuckets - 1) / hctl->ssize + 1;
737 nsegs = next_pow2_int(nsegs);
738
739 /*
740 * Make sure directory is big enough. If pre-allocated directory is too
741 * small, choke (caller screwed up).
742 */
743 if (nsegs > hctl->dsize)
744 {
745 if (!(hashp->dir))
746 hctl->dsize = nsegs;
747 else
748 return false;
749 }
750
751 /* Allocate a directory */
752 if (!(hashp->dir))
753 {
754 CurrentDynaHashCxt = hashp->hcxt;
755 hashp->dir = (HASHSEGMENT *)
756 hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT));
757 if (!hashp->dir)
758 return false;
759 }
760
761 /* Allocate initial segments */
762 for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++)
763 {
764 *segp = seg_alloc(hashp);
765 if (*segp == NULL)
766 return false;
767 }
768
769 /* Choose number of entries to allocate at a time */
771
772 return true;
773}
#define SpinLockInit(lock)
Definition: spin.h:57

References HTAB::alloc, choose_nelem_alloc(), CurrentDynaHashCxt, HTAB::dir, HASHHDR::dsize, HASHHDR::entrysize, HASHHDR::freeList, HTAB::hctl, HTAB::hcxt, HASHHDR::high_mask, i, IS_PARTITIONED, HASHHDR::low_mask, HASHHDR::max_bucket, FreeListData::mutex, HASHHDR::nelem_alloc, next_pow2_int(), HASHHDR::nsegs, NUM_FREELISTS, seg_alloc(), SpinLockInit, and HASHHDR::ssize.

Referenced by hash_create().

◆ my_log2()

static int my_log2 ( int64  num)
static

Definition at line 1817 of file dynahash.c.

1818{
1819 /*
1820 * guard against too-large input, which would be invalid for
1821 * pg_ceil_log2_*()
1822 */
1823 if (num > PG_INT64_MAX / 2)
1824 num = PG_INT64_MAX / 2;
1825
1826 return pg_ceil_log2_64(num);
1827}
#define PG_INT64_MAX
Definition: c.h:598
static uint64 pg_ceil_log2_64(uint64 num)
Definition: pg_bitutils.h:271

References pg_ceil_log2_64(), and PG_INT64_MAX.

Referenced by hash_create(), next_pow2_int(), and next_pow2_int64().

◆ next_pow2_int()

static int next_pow2_int ( int64  num)
static

Definition at line 1839 of file dynahash.c.

1840{
1841 if (num > INT_MAX / 2)
1842 num = INT_MAX / 2;
1843 return 1 << my_log2(num);
1844}

References my_log2().

Referenced by hash_create(), and init_htab().

◆ next_pow2_int64()

static int64 next_pow2_int64 ( int64  num)
static

Definition at line 1831 of file dynahash.c.

1832{
1833 /* my_log2's internal range check is sufficient */
1834 return 1L << my_log2(num);
1835}

References my_log2().

Referenced by hash_estimate_size(), and hash_select_dirsize().

◆ register_seq_scan()

static void register_seq_scan ( HTAB hashp)
static

Definition at line 1884 of file dynahash.c.

1885{
1887 elog(ERROR, "too many active hash_seq_search scans, cannot start one on \"%s\"",
1888 hashp->tabname);
1891 num_seq_scans++;
1892}
#define MAX_SEQ_SCANS
Definition: dynahash.c:1875
int GetCurrentTransactionNestLevel(void)
Definition: xact.c:929

References elog, ERROR, GetCurrentTransactionNestLevel(), MAX_SEQ_SCANS, num_seq_scans, seq_scan_level, seq_scan_tables, and HTAB::tabname.

Referenced by hash_seq_init().

◆ seg_alloc()

static HASHSEGMENT seg_alloc ( HTAB hashp)
static

Definition at line 1682 of file dynahash.c.

1683{
1684 HASHSEGMENT segp;
1685
1686 CurrentDynaHashCxt = hashp->hcxt;
1687 segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->ssize);
1688
1689 if (!segp)
1690 return NULL;
1691
1692 MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->ssize);
1693
1694 return segp;
1695}

References HTAB::alloc, CurrentDynaHashCxt, HTAB::hcxt, MemSet, and HTAB::ssize.

Referenced by expand_table(), and init_htab().

◆ string_compare()

static int string_compare ( const char *  key1,
const char *  key2,
Size  keysize 
)
static

Definition at line 313 of file dynahash.c.

314{
315 return strncmp(key1, key2, keysize - 1);
316}

Referenced by hash_create().

Variable Documentation

◆ CurrentDynaHashCxt

MemoryContext CurrentDynaHashCxt = NULL
static

Definition at line 294 of file dynahash.c.

Referenced by dir_realloc(), DynaHashAlloc(), element_alloc(), hash_create(), init_htab(), and seg_alloc().

◆ num_seq_scans

int num_seq_scans = 0
static

◆ seq_scan_level

int seq_scan_level[MAX_SEQ_SCANS]
static

Definition at line 1878 of file dynahash.c.

Referenced by AtEOSubXact_HashTables(), deregister_seq_scan(), and register_seq_scan().

◆ seq_scan_tables