Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
typcache.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * typcache.c
4 * POSTGRES type cache code
5 *
6 * The type cache exists to speed lookup of certain information about data
7 * types that is not directly available from a type's pg_type row. For
8 * example, we use a type's default btree opclass, or the default hash
9 * opclass if no btree opclass exists, to determine which operators should
10 * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 *
12 * Several seemingly-odd choices have been made to support use of the type
13 * cache by generic array and record handling routines, such as array_eq(),
14 * record_cmp(), and hash_array(). Because those routines are used as index
15 * support operations, they cannot leak memory. To allow them to execute
16 * efficiently, all information that they would like to re-use across calls
17 * is kept in the type cache.
18 *
19 * Once created, a type cache entry lives as long as the backend does, so
20 * there is no need for a call to release a cache entry. If the type is
21 * dropped, the cache entry simply becomes wasted storage. This is not
22 * expected to happen often, and assuming that typcache entries are good
23 * permanently allows caching pointers to them in long-lived places.
24 *
25 * We have some provisions for updating cache entries if the stored data
26 * becomes obsolete. Core data extracted from the pg_type row is updated
27 * when we detect updates to pg_type. Information dependent on opclasses is
28 * cleared if we detect updates to pg_opclass. We also support clearing the
29 * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 * since those may need to change as a consequence of ALTER TABLE. Domain
31 * constraint changes are also tracked properly.
32 *
33 *
34 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
35 * Portions Copyright (c) 1994, Regents of the University of California
36 *
37 * IDENTIFICATION
38 * src/backend/utils/cache/typcache.c
39 *
40 *-------------------------------------------------------------------------
41 */
42#include "postgres.h"
43
44#include <limits.h>
45
46#include "access/hash.h"
47#include "access/htup_details.h"
48#include "access/nbtree.h"
49#include "access/parallel.h"
50#include "access/relation.h"
51#include "access/session.h"
52#include "access/table.h"
53#include "catalog/pg_am.h"
55#include "catalog/pg_enum.h"
56#include "catalog/pg_operator.h"
57#include "catalog/pg_range.h"
58#include "catalog/pg_type.h"
59#include "commands/defrem.h"
60#include "common/int.h"
61#include "executor/executor.h"
62#include "lib/dshash.h"
63#include "optimizer/optimizer.h"
64#include "port/pg_bitutils.h"
65#include "storage/lwlock.h"
66#include "utils/builtins.h"
67#include "utils/catcache.h"
68#include "utils/fmgroids.h"
70#include "utils/inval.h"
71#include "utils/lsyscache.h"
72#include "utils/memutils.h"
73#include "utils/rel.h"
74#include "utils/syscache.h"
75#include "utils/typcache.h"
76
77
78/* The main type cache hashtable searched by lookup_type_cache */
79static HTAB *TypeCacheHash = NULL;
80
81/*
82 * The mapping of relation's OID to the corresponding composite type OID.
83 * We're keeping the map entry when the corresponding typentry has something
84 * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 */
88
90{
91 Oid relid; /* OID of the relation */
92 Oid composite_typid; /* OID of the relation's composite type */
94
95/* List of type cache entries for domain types */
97
98/* Private flag bits in the TypeCacheEntry.flags field */
99#define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100#define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101#define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102#define TCFLAGS_CHECKED_EQ_OPR 0x000008
103#define TCFLAGS_CHECKED_LT_OPR 0x000010
104#define TCFLAGS_CHECKED_GT_OPR 0x000020
105#define TCFLAGS_CHECKED_CMP_PROC 0x000040
106#define TCFLAGS_CHECKED_HASH_PROC 0x000080
107#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108#define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109#define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110#define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111#define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113#define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114#define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115#define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116#define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120
121/* The flags associated with equality/comparison/hashing are all but these: */
122#define TCFLAGS_OPERATOR_FLAGS \
123 (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126
127/*
128 * Data stored about a domain type's constraints. Note that we do not create
129 * this struct for the common case of a constraint-less domain; we just set
130 * domainData to NULL to indicate that.
131 *
132 * Within a DomainConstraintCache, we store expression plan trees, but the
133 * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 * When needed, expression evaluation nodes are built by flat-copying the
135 * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 * Such a node tree is not part of the DomainConstraintCache, but is
137 * considered to belong to a DomainConstraintRef.
138 */
140{
141 List *constraints; /* list of DomainConstraintState nodes */
142 MemoryContext dccContext; /* memory context holding all associated data */
143 long dccRefCount; /* number of references to this struct */
144};
145
146/* Private information to support comparisons of enum values */
147typedef struct
148{
149 Oid enum_oid; /* OID of one enum value */
150 float4 sort_order; /* its sort position */
151} EnumItem;
152
153typedef struct TypeCacheEnumData
154{
155 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 int num_values; /* total number of values in enum */
160
161/*
162 * We use a separate table for storing the definitions of non-anonymous
163 * record types. Once defined, a record type will be remembered for the
164 * life of the backend. Subsequent uses of the "same" record type (where
165 * sameness means equalRowTypes) will refer to the existing table entry.
166 *
167 * Stored record types are remembered in a linear array of TupleDescs,
168 * which can be indexed quickly with the assigned typmod. There is also
169 * a hash table to speed searches for matching TupleDescs.
170 */
171
172typedef struct RecordCacheEntry
173{
176
177/*
178 * To deal with non-anonymous record types that are exchanged by backends
179 * involved in a parallel query, we also need a shared version of the above.
180 */
182{
183 /* A hash table for finding a matching TupleDesc. */
185 /* A hash table for finding a TupleDesc by typmod. */
187 /* A source of new record typmod numbers. */
189};
190
191/*
192 * When using shared tuple descriptors as hash table keys we need a way to be
193 * able to search for an equal shared TupleDesc using a backend-local
194 * TupleDesc. So we use this type which can hold either, and hash and compare
195 * functions that know how to handle both.
196 */
198{
199 union
200 {
203 } u;
204 bool shared;
206
207/*
208 * The shared version of RecordCacheEntry. This lets us look up a typmod
209 * using a TupleDesc which may be in local or shared memory.
210 */
212{
215
216/*
217 * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 * up a TupleDesc in shared memory using a typmod.
219 */
221{
225
229
230/*
231 * A comparator function for SharedRecordTableKey.
232 */
233static int
234shared_record_table_compare(const void *a, const void *b, size_t size,
235 void *arg)
236{
237 dsa_area *area = (dsa_area *) arg;
240 TupleDesc t1;
241 TupleDesc t2;
242
243 if (k1->shared)
244 t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 else
246 t1 = k1->u.local_tupdesc;
247
248 if (k2->shared)
249 t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 else
251 t2 = k2->u.local_tupdesc;
252
253 return equalRowTypes(t1, t2) ? 0 : 1;
254}
255
256/*
257 * A hash function for SharedRecordTableKey.
258 */
259static uint32
260shared_record_table_hash(const void *a, size_t size, void *arg)
261{
262 dsa_area *area = (dsa_area *) arg;
264 TupleDesc t;
265
266 if (k->shared)
268 else
269 t = k->u.local_tupdesc;
270
271 return hashRowType(t);
272}
273
274/* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
276 sizeof(SharedRecordTableKey), /* unused */
281 LWTRANCHE_PER_SESSION_RECORD_TYPE
282};
283
284/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
286 sizeof(uint32),
291 LWTRANCHE_PER_SESSION_RECORD_TYPMOD
292};
293
294/* hashtable for recognizing registered record types */
295static HTAB *RecordCacheHash = NULL;
296
298{
302
303/* array of info about registered record types, indexed by assigned typmod */
305static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306static int32 NextRecordTypmod = 0; /* number of entries used */
307
308/*
309 * Process-wide counter for generating unique tupledesc identifiers.
310 * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 */
314
315static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316static void load_rangetype_info(TypeCacheEntry *typentry);
317static void load_multirangetype_info(TypeCacheEntry *typentry);
318static void load_domaintype_info(TypeCacheEntry *typentry);
319static int dcs_cmp(const void *a, const void *b);
321static void dccref_deletion_callback(void *arg);
322static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
323static bool array_element_has_equality(TypeCacheEntry *typentry);
324static bool array_element_has_compare(TypeCacheEntry *typentry);
325static bool array_element_has_hashing(TypeCacheEntry *typentry);
328static bool record_fields_have_equality(TypeCacheEntry *typentry);
329static bool record_fields_have_compare(TypeCacheEntry *typentry);
330static bool record_fields_have_hashing(TypeCacheEntry *typentry);
332static void cache_record_field_properties(TypeCacheEntry *typentry);
333static bool range_element_has_hashing(TypeCacheEntry *typentry);
339static void TypeCacheRelCallback(Datum arg, Oid relid);
340static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
341static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
342static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
343static void load_enum_cache_data(TypeCacheEntry *tcache);
345static int enum_oid_cmp(const void *left, const void *right);
347 Datum datum);
349static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
350 uint32 typmod);
353
354
355/*
356 * Hash function compatible with one-arg system cache hash function.
357 */
358static uint32
359type_cache_syshash(const void *key, Size keysize)
360{
361 Assert(keysize == sizeof(Oid));
362 return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
363}
364
365/*
366 * lookup_type_cache
367 *
368 * Fetch the type cache entry for the specified datatype, and make sure that
369 * all the fields requested by bits in 'flags' are valid.
370 *
371 * The result is never NULL --- we will ereport() if the passed type OID is
372 * invalid. Note however that we may fail to find one or more of the
373 * values requested by 'flags'; the caller needs to check whether the fields
374 * are InvalidOid or not.
375 *
376 * Note that while filling TypeCacheEntry we might process concurrent
377 * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
378 * invalidated. In this case, we typically only clear flags while values are
379 * still available for the caller. It's expected that the caller holds
380 * enough locks on type-depending objects that the values are still relevant.
381 * It's also important that the tupdesc is filled after all other
382 * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
383 * invalidated during the lookup_type_cache() call.
384 */
386lookup_type_cache(Oid type_id, int flags)
387{
388 TypeCacheEntry *typentry;
389 bool found;
390 int in_progress_offset;
391
392 if (TypeCacheHash == NULL)
393 {
394 /* First time through: initialize the hash table */
395 HASHCTL ctl;
396 int allocsize;
397
398 ctl.keysize = sizeof(Oid);
399 ctl.entrysize = sizeof(TypeCacheEntry);
400
401 /*
402 * TypeCacheEntry takes hash value from the system cache. For
403 * TypeCacheHash we use the same hash in order to speedup search by
404 * hash value. This is used by hash_seq_init_with_hash_value().
405 */
406 ctl.hash = type_cache_syshash;
407
408 TypeCacheHash = hash_create("Type information cache", 64,
410
412
413 ctl.keysize = sizeof(Oid);
414 ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
415 RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
417
418 /* Also set up callbacks for SI invalidations */
423
424 /* Also make sure CacheMemoryContext exists */
427
428 /*
429 * reserve enough in_progress_list slots for many cases
430 */
431 allocsize = 4;
434 allocsize * sizeof(*in_progress_list));
435 in_progress_list_maxlen = allocsize;
436 }
437
438 Assert(TypeCacheHash != NULL && RelIdToTypeIdCacheHash != NULL);
439
440 /* Register to catch invalidation messages */
442 {
443 int allocsize;
444
445 allocsize = in_progress_list_maxlen * 2;
447 allocsize * sizeof(*in_progress_list));
448 in_progress_list_maxlen = allocsize;
449 }
450 in_progress_offset = in_progress_list_len++;
451 in_progress_list[in_progress_offset] = type_id;
452
453 /* Try to look up an existing entry */
455 &type_id,
456 HASH_FIND, NULL);
457 if (typentry == NULL)
458 {
459 /*
460 * If we didn't find one, we want to make one. But first look up the
461 * pg_type row, just to make sure we don't make a cache entry for an
462 * invalid type OID. If the type OID is not valid, present a
463 * user-facing error, since some code paths such as domain_in() allow
464 * this function to be reached with a user-supplied OID.
465 */
466 HeapTuple tp;
467 Form_pg_type typtup;
468
469 tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
470 if (!HeapTupleIsValid(tp))
472 (errcode(ERRCODE_UNDEFINED_OBJECT),
473 errmsg("type with OID %u does not exist", type_id)));
474 typtup = (Form_pg_type) GETSTRUCT(tp);
475 if (!typtup->typisdefined)
477 (errcode(ERRCODE_UNDEFINED_OBJECT),
478 errmsg("type \"%s\" is only a shell",
479 NameStr(typtup->typname))));
480
481 /* Now make the typcache entry */
483 &type_id,
484 HASH_ENTER, &found);
485 Assert(!found); /* it wasn't there a moment ago */
486
487 MemSet(typentry, 0, sizeof(TypeCacheEntry));
488
489 /* These fields can never change, by definition */
490 typentry->type_id = type_id;
491 typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
492
493 /* Keep this part in sync with the code below */
494 typentry->typlen = typtup->typlen;
495 typentry->typbyval = typtup->typbyval;
496 typentry->typalign = typtup->typalign;
497 typentry->typstorage = typtup->typstorage;
498 typentry->typtype = typtup->typtype;
499 typentry->typrelid = typtup->typrelid;
500 typentry->typsubscript = typtup->typsubscript;
501 typentry->typelem = typtup->typelem;
502 typentry->typarray = typtup->typarray;
503 typentry->typcollation = typtup->typcollation;
504 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
505
506 /* If it's a domain, immediately thread it into the domain cache list */
507 if (typentry->typtype == TYPTYPE_DOMAIN)
508 {
510 firstDomainTypeEntry = typentry;
511 }
512
513 ReleaseSysCache(tp);
514 }
515 else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
516 {
517 /*
518 * We have an entry, but its pg_type row got changed, so reload the
519 * data obtained directly from pg_type.
520 */
521 HeapTuple tp;
522 Form_pg_type typtup;
523
524 tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
525 if (!HeapTupleIsValid(tp))
527 (errcode(ERRCODE_UNDEFINED_OBJECT),
528 errmsg("type with OID %u does not exist", type_id)));
529 typtup = (Form_pg_type) GETSTRUCT(tp);
530 if (!typtup->typisdefined)
532 (errcode(ERRCODE_UNDEFINED_OBJECT),
533 errmsg("type \"%s\" is only a shell",
534 NameStr(typtup->typname))));
535
536 /*
537 * Keep this part in sync with the code above. Many of these fields
538 * shouldn't ever change, particularly typtype, but copy 'em anyway.
539 */
540 typentry->typlen = typtup->typlen;
541 typentry->typbyval = typtup->typbyval;
542 typentry->typalign = typtup->typalign;
543 typentry->typstorage = typtup->typstorage;
544 typentry->typtype = typtup->typtype;
545 typentry->typrelid = typtup->typrelid;
546 typentry->typsubscript = typtup->typsubscript;
547 typentry->typelem = typtup->typelem;
548 typentry->typarray = typtup->typarray;
549 typentry->typcollation = typtup->typcollation;
550 typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
551
552 ReleaseSysCache(tp);
553 }
554
555 /*
556 * Look up opclasses if we haven't already and any dependent info is
557 * requested.
558 */
564 {
565 Oid opclass;
566
567 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
568 if (OidIsValid(opclass))
569 {
570 typentry->btree_opf = get_opclass_family(opclass);
571 typentry->btree_opintype = get_opclass_input_type(opclass);
572 }
573 else
574 {
575 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
576 }
577
578 /*
579 * Reset information derived from btree opclass. Note in particular
580 * that we'll redetermine the eq_opr even if we previously found one;
581 * this matters in case a btree opclass has been added to a type that
582 * previously had only a hash opclass.
583 */
584 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
589 }
590
591 /*
592 * If we need to look up equality operator, and there's no btree opclass,
593 * force lookup of hash opclass.
594 */
595 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
596 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
597 typentry->btree_opf == InvalidOid)
599
604 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
605 {
606 Oid opclass;
607
608 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
609 if (OidIsValid(opclass))
610 {
611 typentry->hash_opf = get_opclass_family(opclass);
612 typentry->hash_opintype = get_opclass_input_type(opclass);
613 }
614 else
615 {
616 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
617 }
618
619 /*
620 * Reset information derived from hash opclass. We do *not* reset the
621 * eq_opr; if we already found one from the btree opclass, that
622 * decision is still good.
623 */
624 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
627 }
628
629 /*
630 * Look for requested operators and functions, if we haven't already.
631 */
632 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
633 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
634 {
635 Oid eq_opr = InvalidOid;
636
637 if (typentry->btree_opf != InvalidOid)
638 eq_opr = get_opfamily_member(typentry->btree_opf,
639 typentry->btree_opintype,
640 typentry->btree_opintype,
642 if (eq_opr == InvalidOid &&
643 typentry->hash_opf != InvalidOid)
644 eq_opr = get_opfamily_member(typentry->hash_opf,
645 typentry->hash_opintype,
646 typentry->hash_opintype,
648
649 /*
650 * If the proposed equality operator is array_eq or record_eq, check
651 * to see if the element type or column types support equality. If
652 * not, array_eq or record_eq would fail at runtime, so we don't want
653 * to report that the type has equality. (We can omit similar
654 * checking for ranges and multiranges because ranges can't be created
655 * in the first place unless their subtypes support equality.)
656 */
657 if (eq_opr == ARRAY_EQ_OP &&
659 eq_opr = InvalidOid;
660 else if (eq_opr == RECORD_EQ_OP &&
662 eq_opr = InvalidOid;
663
664 /* Force update of eq_opr_finfo only if we're changing state */
665 if (typentry->eq_opr != eq_opr)
666 typentry->eq_opr_finfo.fn_oid = InvalidOid;
667
668 typentry->eq_opr = eq_opr;
669
670 /*
671 * Reset info about hash functions whenever we pick up new info about
672 * equality operator. This is so we can ensure that the hash
673 * functions match the operator.
674 */
675 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
677 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
678 }
679 if ((flags & TYPECACHE_LT_OPR) &&
680 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
681 {
682 Oid lt_opr = InvalidOid;
683
684 if (typentry->btree_opf != InvalidOid)
685 lt_opr = get_opfamily_member(typentry->btree_opf,
686 typentry->btree_opintype,
687 typentry->btree_opintype,
689
690 /*
691 * As above, make sure array_cmp or record_cmp will succeed; but again
692 * we need no special check for ranges or multiranges.
693 */
694 if (lt_opr == ARRAY_LT_OP &&
695 !array_element_has_compare(typentry))
696 lt_opr = InvalidOid;
697 else if (lt_opr == RECORD_LT_OP &&
699 lt_opr = InvalidOid;
700
701 typentry->lt_opr = lt_opr;
702 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
703 }
704 if ((flags & TYPECACHE_GT_OPR) &&
705 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
706 {
707 Oid gt_opr = InvalidOid;
708
709 if (typentry->btree_opf != InvalidOid)
710 gt_opr = get_opfamily_member(typentry->btree_opf,
711 typentry->btree_opintype,
712 typentry->btree_opintype,
714
715 /*
716 * As above, make sure array_cmp or record_cmp will succeed; but again
717 * we need no special check for ranges or multiranges.
718 */
719 if (gt_opr == ARRAY_GT_OP &&
720 !array_element_has_compare(typentry))
721 gt_opr = InvalidOid;
722 else if (gt_opr == RECORD_GT_OP &&
724 gt_opr = InvalidOid;
725
726 typentry->gt_opr = gt_opr;
727 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
728 }
730 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
731 {
732 Oid cmp_proc = InvalidOid;
733
734 if (typentry->btree_opf != InvalidOid)
735 cmp_proc = get_opfamily_proc(typentry->btree_opf,
736 typentry->btree_opintype,
737 typentry->btree_opintype,
739
740 /*
741 * As above, make sure array_cmp or record_cmp will succeed; but again
742 * we need no special check for ranges or multiranges.
743 */
744 if (cmp_proc == F_BTARRAYCMP &&
745 !array_element_has_compare(typentry))
746 cmp_proc = InvalidOid;
747 else if (cmp_proc == F_BTRECORDCMP &&
749 cmp_proc = InvalidOid;
750
751 /* Force update of cmp_proc_finfo only if we're changing state */
752 if (typentry->cmp_proc != cmp_proc)
753 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
754
755 typentry->cmp_proc = cmp_proc;
756 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
757 }
759 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
760 {
761 Oid hash_proc = InvalidOid;
762
763 /*
764 * We insist that the eq_opr, if one has been determined, match the
765 * hash opclass; else report there is no hash function.
766 */
767 if (typentry->hash_opf != InvalidOid &&
768 (!OidIsValid(typentry->eq_opr) ||
769 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
770 typentry->hash_opintype,
771 typentry->hash_opintype,
773 hash_proc = get_opfamily_proc(typentry->hash_opf,
774 typentry->hash_opintype,
775 typentry->hash_opintype,
777
778 /*
779 * As above, make sure hash_array, hash_record, or hash_range will
780 * succeed.
781 */
782 if (hash_proc == F_HASH_ARRAY &&
783 !array_element_has_hashing(typentry))
784 hash_proc = InvalidOid;
785 else if (hash_proc == F_HASH_RECORD &&
787 hash_proc = InvalidOid;
788 else if (hash_proc == F_HASH_RANGE &&
789 !range_element_has_hashing(typentry))
790 hash_proc = InvalidOid;
791
792 /*
793 * Likewise for hash_multirange.
794 */
795 if (hash_proc == F_HASH_MULTIRANGE &&
797 hash_proc = InvalidOid;
798
799 /* Force update of hash_proc_finfo only if we're changing state */
800 if (typentry->hash_proc != hash_proc)
802
803 typentry->hash_proc = hash_proc;
804 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
805 }
806 if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
809 {
810 Oid hash_extended_proc = InvalidOid;
811
812 /*
813 * We insist that the eq_opr, if one has been determined, match the
814 * hash opclass; else report there is no hash function.
815 */
816 if (typentry->hash_opf != InvalidOid &&
817 (!OidIsValid(typentry->eq_opr) ||
818 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
819 typentry->hash_opintype,
820 typentry->hash_opintype,
822 hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
823 typentry->hash_opintype,
824 typentry->hash_opintype,
826
827 /*
828 * As above, make sure hash_array_extended, hash_record_extended, or
829 * hash_range_extended will succeed.
830 */
831 if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
833 hash_extended_proc = InvalidOid;
834 else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
836 hash_extended_proc = InvalidOid;
837 else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
839 hash_extended_proc = InvalidOid;
840
841 /*
842 * Likewise for hash_multirange_extended.
843 */
844 if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
846 hash_extended_proc = InvalidOid;
847
848 /* Force update of proc finfo only if we're changing state */
849 if (typentry->hash_extended_proc != hash_extended_proc)
851
852 typentry->hash_extended_proc = hash_extended_proc;
854 }
855
856 /*
857 * Set up fmgr lookup info as requested
858 *
859 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
860 * which is not quite right (they're really in the hash table's private
861 * memory context) but this will do for our purposes.
862 *
863 * Note: the code above avoids invalidating the finfo structs unless the
864 * referenced operator/function OID actually changes. This is to prevent
865 * unnecessary leakage of any subsidiary data attached to an finfo, since
866 * that would cause session-lifespan memory leaks.
867 */
868 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
869 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
870 typentry->eq_opr != InvalidOid)
871 {
872 Oid eq_opr_func;
873
874 eq_opr_func = get_opcode(typentry->eq_opr);
875 if (eq_opr_func != InvalidOid)
876 fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
878 }
879 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
880 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
881 typentry->cmp_proc != InvalidOid)
882 {
883 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
885 }
886 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
887 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
888 typentry->hash_proc != InvalidOid)
889 {
890 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
892 }
895 typentry->hash_extended_proc != InvalidOid)
896 {
898 &typentry->hash_extended_proc_finfo,
900 }
901
902 /*
903 * If it's a composite type (row type), get tupdesc if requested
904 */
905 if ((flags & TYPECACHE_TUPDESC) &&
906 typentry->tupDesc == NULL &&
907 typentry->typtype == TYPTYPE_COMPOSITE)
908 {
909 load_typcache_tupdesc(typentry);
910 }
911
912 /*
913 * If requested, get information about a range type
914 *
915 * This includes making sure that the basic info about the range element
916 * type is up-to-date.
917 */
918 if ((flags & TYPECACHE_RANGE_INFO) &&
919 typentry->typtype == TYPTYPE_RANGE)
920 {
921 if (typentry->rngelemtype == NULL)
922 load_rangetype_info(typentry);
923 else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
924 (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
925 }
926
927 /*
928 * If requested, get information about a multirange type
929 */
930 if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
931 typentry->rngtype == NULL &&
932 typentry->typtype == TYPTYPE_MULTIRANGE)
933 {
934 load_multirangetype_info(typentry);
935 }
936
937 /*
938 * If requested, get information about a domain type
939 */
940 if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
941 typentry->domainBaseType == InvalidOid &&
942 typentry->typtype == TYPTYPE_DOMAIN)
943 {
944 typentry->domainBaseTypmod = -1;
945 typentry->domainBaseType =
946 getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
947 }
948 if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
949 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
950 typentry->typtype == TYPTYPE_DOMAIN)
951 {
952 load_domaintype_info(typentry);
953 }
954
955 INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
956
957 Assert(in_progress_offset + 1 == in_progress_list_len);
959
961
962 return typentry;
963}
964
965/*
966 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
967 */
968static void
970{
971 Relation rel;
972
973 if (!OidIsValid(typentry->typrelid)) /* should not happen */
974 elog(ERROR, "invalid typrelid for composite type %u",
975 typentry->type_id);
976 rel = relation_open(typentry->typrelid, AccessShareLock);
977 Assert(rel->rd_rel->reltype == typentry->type_id);
978
979 /*
980 * Link to the tupdesc and increment its refcount (we assert it's a
981 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
982 * because the reference mustn't be entered in the current resource owner;
983 * it can outlive the current query.
984 */
985 typentry->tupDesc = RelationGetDescr(rel);
986
987 Assert(typentry->tupDesc->tdrefcount > 0);
988 typentry->tupDesc->tdrefcount++;
989
990 /*
991 * In future, we could take some pains to not change tupDesc_identifier if
992 * the tupdesc didn't really change; but for now it's not worth it.
993 */
995
997}
998
999/*
1000 * load_rangetype_info --- helper routine to set up range type information
1001 */
1002static void
1004{
1005 Form_pg_range pg_range;
1006 HeapTuple tup;
1007 Oid subtypeOid;
1008 Oid opclassOid;
1009 Oid canonicalOid;
1010 Oid subdiffOid;
1011 Oid opfamilyOid;
1012 Oid opcintype;
1013 Oid cmpFnOid;
1014
1015 /* get information from pg_range */
1016 tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
1017 /* should not fail, since we already checked typtype ... */
1018 if (!HeapTupleIsValid(tup))
1019 elog(ERROR, "cache lookup failed for range type %u",
1020 typentry->type_id);
1021 pg_range = (Form_pg_range) GETSTRUCT(tup);
1022
1023 subtypeOid = pg_range->rngsubtype;
1024 typentry->rng_collation = pg_range->rngcollation;
1025 opclassOid = pg_range->rngsubopc;
1026 canonicalOid = pg_range->rngcanonical;
1027 subdiffOid = pg_range->rngsubdiff;
1028
1029 ReleaseSysCache(tup);
1030
1031 /* get opclass properties and look up the comparison function */
1032 opfamilyOid = get_opclass_family(opclassOid);
1033 opcintype = get_opclass_input_type(opclassOid);
1034 typentry->rng_opfamily = opfamilyOid;
1035
1036 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1037 BTORDER_PROC);
1038 if (!RegProcedureIsValid(cmpFnOid))
1039 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1040 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1041
1042 /* set up cached fmgrinfo structs */
1043 fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
1045 if (OidIsValid(canonicalOid))
1046 fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
1048 if (OidIsValid(subdiffOid))
1049 fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
1051
1052 /* Lastly, set up link to the element type --- this marks data valid */
1053 typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
1054}
1055
1056/*
1057 * load_multirangetype_info --- helper routine to set up multirange type
1058 * information
1059 */
1060static void
1062{
1063 Oid rangetypeOid;
1064
1065 rangetypeOid = get_multirange_range(typentry->type_id);
1066 if (!OidIsValid(rangetypeOid))
1067 elog(ERROR, "cache lookup failed for multirange type %u",
1068 typentry->type_id);
1069
1070 typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
1071}
1072
1073/*
1074 * load_domaintype_info --- helper routine to set up domain constraint info
1075 *
1076 * Note: we assume we're called in a relatively short-lived context, so it's
1077 * okay to leak data into the current context while scanning pg_constraint.
1078 * We build the new DomainConstraintCache data in a context underneath
1079 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1080 * complete.
1081 */
1082static void
1084{
1085 Oid typeOid = typentry->type_id;
1087 bool notNull = false;
1088 DomainConstraintState **ccons;
1089 int cconslen;
1090 Relation conRel;
1091 MemoryContext oldcxt;
1092
1093 /*
1094 * If we're here, any existing constraint info is stale, so release it.
1095 * For safety, be sure to null the link before trying to delete the data.
1096 */
1097 if (typentry->domainData)
1098 {
1099 dcc = typentry->domainData;
1100 typentry->domainData = NULL;
1101 decr_dcc_refcount(dcc);
1102 }
1103
1104 /*
1105 * We try to optimize the common case of no domain constraints, so don't
1106 * create the dcc object and context until we find a constraint. Likewise
1107 * for the temp sorting array.
1108 */
1109 dcc = NULL;
1110 ccons = NULL;
1111 cconslen = 0;
1112
1113 /*
1114 * Scan pg_constraint for relevant constraints. We want to find
1115 * constraints for not just this domain, but any ancestor domains, so the
1116 * outer loop crawls up the domain stack.
1117 */
1118 conRel = table_open(ConstraintRelationId, AccessShareLock);
1119
1120 for (;;)
1121 {
1122 HeapTuple tup;
1123 HeapTuple conTup;
1124 Form_pg_type typTup;
1125 int nccons = 0;
1126 ScanKeyData key[1];
1127 SysScanDesc scan;
1128
1129 tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1130 if (!HeapTupleIsValid(tup))
1131 elog(ERROR, "cache lookup failed for type %u", typeOid);
1132 typTup = (Form_pg_type) GETSTRUCT(tup);
1133
1134 if (typTup->typtype != TYPTYPE_DOMAIN)
1135 {
1136 /* Not a domain, so done */
1137 ReleaseSysCache(tup);
1138 break;
1139 }
1140
1141 /* Test for NOT NULL Constraint */
1142 if (typTup->typnotnull)
1143 notNull = true;
1144
1145 /* Look for CHECK Constraints on this domain */
1146 ScanKeyInit(&key[0],
1147 Anum_pg_constraint_contypid,
1148 BTEqualStrategyNumber, F_OIDEQ,
1149 ObjectIdGetDatum(typeOid));
1150
1151 scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1152 NULL, 1, key);
1153
1154 while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1155 {
1157 Datum val;
1158 bool isNull;
1159 char *constring;
1160 Expr *check_expr;
1162
1163 /* Ignore non-CHECK constraints */
1164 if (c->contype != CONSTRAINT_CHECK)
1165 continue;
1166
1167 /* Not expecting conbin to be NULL, but we'll test for it anyway */
1168 val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1169 conRel->rd_att, &isNull);
1170 if (isNull)
1171 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1172 NameStr(typTup->typname), NameStr(c->conname));
1173
1174 /* Create the DomainConstraintCache object and context if needed */
1175 if (dcc == NULL)
1176 {
1177 MemoryContext cxt;
1178
1180 "Domain constraints",
1182 dcc = (DomainConstraintCache *)
1184 dcc->constraints = NIL;
1185 dcc->dccContext = cxt;
1186 dcc->dccRefCount = 0;
1187 }
1188
1189 /* Convert conbin to a node tree, still in caller's context */
1190 constring = TextDatumGetCString(val);
1191 check_expr = (Expr *) stringToNode(constring);
1192
1193 /*
1194 * Plan the expression, since ExecInitExpr will expect that.
1195 *
1196 * Note: caching the result of expression_planner() is not very
1197 * good practice. Ideally we'd use a CachedExpression here so
1198 * that we would react promptly to, eg, changes in inlined
1199 * functions. However, because we don't support mutable domain
1200 * CHECK constraints, it's not really clear that it's worth the
1201 * extra overhead to do that.
1202 */
1203 check_expr = expression_planner(check_expr);
1204
1205 /* Create only the minimally needed stuff in dccContext */
1206 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1207
1210 r->name = pstrdup(NameStr(c->conname));
1211 r->check_expr = copyObject(check_expr);
1212 r->check_exprstate = NULL;
1213
1214 MemoryContextSwitchTo(oldcxt);
1215
1216 /* Accumulate constraints in an array, for sorting below */
1217 if (ccons == NULL)
1218 {
1219 cconslen = 8;
1220 ccons = (DomainConstraintState **)
1221 palloc(cconslen * sizeof(DomainConstraintState *));
1222 }
1223 else if (nccons >= cconslen)
1224 {
1225 cconslen *= 2;
1226 ccons = (DomainConstraintState **)
1227 repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1228 }
1229 ccons[nccons++] = r;
1230 }
1231
1232 systable_endscan(scan);
1233
1234 if (nccons > 0)
1235 {
1236 /*
1237 * Sort the items for this domain, so that CHECKs are applied in a
1238 * deterministic order.
1239 */
1240 if (nccons > 1)
1241 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1242
1243 /*
1244 * Now attach them to the overall list. Use lcons() here because
1245 * constraints of parent domains should be applied earlier.
1246 */
1247 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1248 while (nccons > 0)
1249 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1250 MemoryContextSwitchTo(oldcxt);
1251 }
1252
1253 /* loop to next domain in stack */
1254 typeOid = typTup->typbasetype;
1255 ReleaseSysCache(tup);
1256 }
1257
1259
1260 /*
1261 * Only need to add one NOT NULL check regardless of how many domains in
1262 * the stack request it.
1263 */
1264 if (notNull)
1265 {
1267
1268 /* Create the DomainConstraintCache object and context if needed */
1269 if (dcc == NULL)
1270 {
1271 MemoryContext cxt;
1272
1274 "Domain constraints",
1276 dcc = (DomainConstraintCache *)
1278 dcc->constraints = NIL;
1279 dcc->dccContext = cxt;
1280 dcc->dccRefCount = 0;
1281 }
1282
1283 /* Create node trees in DomainConstraintCache's context */
1284 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1285
1287
1289 r->name = pstrdup("NOT NULL");
1290 r->check_expr = NULL;
1291 r->check_exprstate = NULL;
1292
1293 /* lcons to apply the nullness check FIRST */
1294 dcc->constraints = lcons(r, dcc->constraints);
1295
1296 MemoryContextSwitchTo(oldcxt);
1297 }
1298
1299 /*
1300 * If we made a constraint object, move it into CacheMemoryContext and
1301 * attach it to the typcache entry.
1302 */
1303 if (dcc)
1304 {
1306 typentry->domainData = dcc;
1307 dcc->dccRefCount++; /* count the typcache's reference */
1308 }
1309
1310 /* Either way, the typcache entry's domain data is now valid. */
1312}
1313
1314/*
1315 * qsort comparator to sort DomainConstraintState pointers by name
1316 */
1317static int
1318dcs_cmp(const void *a, const void *b)
1319{
1320 const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1321 const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1322
1323 return strcmp((*ca)->name, (*cb)->name);
1324}
1325
1326/*
1327 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1328 * and free it if no references remain
1329 */
1330static void
1332{
1333 Assert(dcc->dccRefCount > 0);
1334 if (--(dcc->dccRefCount) <= 0)
1336}
1337
1338/*
1339 * Context reset/delete callback for a DomainConstraintRef
1340 */
1341static void
1343{
1345 DomainConstraintCache *dcc = ref->dcc;
1346
1347 /* Paranoia --- be sure link is nulled before trying to release */
1348 if (dcc)
1349 {
1350 ref->constraints = NIL;
1351 ref->dcc = NULL;
1352 decr_dcc_refcount(dcc);
1353 }
1354}
1355
1356/*
1357 * prep_domain_constraints --- prepare domain constraints for execution
1358 *
1359 * The expression trees stored in the DomainConstraintCache's list are
1360 * converted to executable expression state trees stored in execctx.
1361 */
1362static List *
1364{
1365 List *result = NIL;
1366 MemoryContext oldcxt;
1367 ListCell *lc;
1368
1369 oldcxt = MemoryContextSwitchTo(execctx);
1370
1371 foreach(lc, constraints)
1372 {
1375
1377 newr->constrainttype = r->constrainttype;
1378 newr->name = r->name;
1379 newr->check_expr = r->check_expr;
1380 newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1381
1382 result = lappend(result, newr);
1383 }
1384
1385 MemoryContextSwitchTo(oldcxt);
1386
1387 return result;
1388}
1389
1390/*
1391 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1392 *
1393 * Caller must tell us the MemoryContext in which the DomainConstraintRef
1394 * lives. The ref will be cleaned up when that context is reset/deleted.
1395 *
1396 * Caller must also tell us whether it wants check_exprstate fields to be
1397 * computed in the DomainConstraintState nodes attached to this ref.
1398 * If it doesn't, we need not make a copy of the DomainConstraintState list.
1399 */
1400void
1402 MemoryContext refctx, bool need_exprstate)
1403{
1404 /* Look up the typcache entry --- we assume it survives indefinitely */
1406 ref->need_exprstate = need_exprstate;
1407 /* For safety, establish the callback before acquiring a refcount */
1408 ref->refctx = refctx;
1409 ref->dcc = NULL;
1411 ref->callback.arg = ref;
1413 /* Acquire refcount if there are constraints, and set up exported list */
1414 if (ref->tcache->domainData)
1415 {
1416 ref->dcc = ref->tcache->domainData;
1417 ref->dcc->dccRefCount++;
1418 if (ref->need_exprstate)
1420 ref->refctx);
1421 else
1422 ref->constraints = ref->dcc->constraints;
1423 }
1424 else
1425 ref->constraints = NIL;
1426}
1427
1428/*
1429 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1430 *
1431 * If the domain's constraint set changed, ref->constraints is updated to
1432 * point at a new list of cached constraints.
1433 *
1434 * In the normal case where nothing happened to the domain, this is cheap
1435 * enough that it's reasonable (and expected) to check before *each* use
1436 * of the constraint info.
1437 */
1438void
1440{
1441 TypeCacheEntry *typentry = ref->tcache;
1442
1443 /* Make sure typcache entry's data is up to date */
1444 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1445 typentry->typtype == TYPTYPE_DOMAIN)
1446 load_domaintype_info(typentry);
1447
1448 /* Transfer to ref object if there's new info, adjusting refcounts */
1449 if (ref->dcc != typentry->domainData)
1450 {
1451 /* Paranoia --- be sure link is nulled before trying to release */
1452 DomainConstraintCache *dcc = ref->dcc;
1453
1454 if (dcc)
1455 {
1456 /*
1457 * Note: we just leak the previous list of executable domain
1458 * constraints. Alternatively, we could keep those in a child
1459 * context of ref->refctx and free that context at this point.
1460 * However, in practice this code path will be taken so seldom
1461 * that the extra bookkeeping for a child context doesn't seem
1462 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1463 */
1464 ref->constraints = NIL;
1465 ref->dcc = NULL;
1466 decr_dcc_refcount(dcc);
1467 }
1468 dcc = typentry->domainData;
1469 if (dcc)
1470 {
1471 ref->dcc = dcc;
1472 dcc->dccRefCount++;
1473 if (ref->need_exprstate)
1475 ref->refctx);
1476 else
1477 ref->constraints = dcc->constraints;
1478 }
1479 }
1480}
1481
1482/*
1483 * DomainHasConstraints --- utility routine to check if a domain has constraints
1484 *
1485 * This is defined to return false, not fail, if type is not a domain.
1486 */
1487bool
1489{
1490 TypeCacheEntry *typentry;
1491
1492 /*
1493 * Note: a side effect is to cause the typcache's domain data to become
1494 * valid. This is fine since we'll likely need it soon if there is any.
1495 */
1497
1498 return (typentry->domainData != NULL);
1499}
1500
1501
1502/*
1503 * array_element_has_equality and friends are helper routines to check
1504 * whether we should believe that array_eq and related functions will work
1505 * on the given array type or composite type.
1506 *
1507 * The logic above may call these repeatedly on the same type entry, so we
1508 * make use of the typentry->flags field to cache the results once known.
1509 * Also, we assume that we'll probably want all these facts about the type
1510 * if we want any, so we cache them all using only one lookup of the
1511 * component datatype(s).
1512 */
1513
1514static bool
1516{
1517 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1519 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1520}
1521
1522static bool
1524{
1525 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1527 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1528}
1529
1530static bool
1532{
1533 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1535 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1536}
1537
1538static bool
1540{
1541 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1543 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1544}
1545
1546static void
1548{
1549 Oid elem_type = get_base_element_type(typentry->type_id);
1550
1551 if (OidIsValid(elem_type))
1552 {
1553 TypeCacheEntry *elementry;
1554
1555 elementry = lookup_type_cache(elem_type,
1560 if (OidIsValid(elementry->eq_opr))
1561 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1562 if (OidIsValid(elementry->cmp_proc))
1563 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1564 if (OidIsValid(elementry->hash_proc))
1565 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1566 if (OidIsValid(elementry->hash_extended_proc))
1568 }
1570}
1571
1572/*
1573 * Likewise, some helper functions for composite types.
1574 */
1575
1576static bool
1578{
1579 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1581 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1582}
1583
1584static bool
1586{
1587 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1589 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1590}
1591
1592static bool
1594{
1595 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1597 return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1598}
1599
1600static bool
1602{
1603 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1605 return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1606}
1607
1608static void
1610{
1611 /*
1612 * For type RECORD, we can't really tell what will work, since we don't
1613 * have access here to the specific anonymous type. Just assume that
1614 * equality and comparison will (we may get a failure at runtime). We
1615 * could also claim that hashing works, but then if code that has the
1616 * option between a comparison-based (sort-based) and a hash-based plan
1617 * chooses hashing, stuff could fail that would otherwise work if it chose
1618 * a comparison-based plan. In practice more types support comparison
1619 * than hashing.
1620 */
1621 if (typentry->type_id == RECORDOID)
1622 {
1623 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1625 }
1626 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1627 {
1628 TupleDesc tupdesc;
1629 int newflags;
1630 int i;
1631
1632 /* Fetch composite type's tupdesc if we don't have it already */
1633 if (typentry->tupDesc == NULL)
1634 load_typcache_tupdesc(typentry);
1635 tupdesc = typentry->tupDesc;
1636
1637 /* Must bump the refcount while we do additional catalog lookups */
1638 IncrTupleDescRefCount(tupdesc);
1639
1640 /* Have each property if all non-dropped fields have the property */
1641 newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1645 for (i = 0; i < tupdesc->natts; i++)
1646 {
1647 TypeCacheEntry *fieldentry;
1648 Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1649
1650 if (attr->attisdropped)
1651 continue;
1652
1653 fieldentry = lookup_type_cache(attr->atttypid,
1658 if (!OidIsValid(fieldentry->eq_opr))
1659 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1660 if (!OidIsValid(fieldentry->cmp_proc))
1661 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1662 if (!OidIsValid(fieldentry->hash_proc))
1663 newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1664 if (!OidIsValid(fieldentry->hash_extended_proc))
1665 newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
1666
1667 /* We can drop out of the loop once we disprove all bits */
1668 if (newflags == 0)
1669 break;
1670 }
1671 typentry->flags |= newflags;
1672
1673 DecrTupleDescRefCount(tupdesc);
1674 }
1675 else if (typentry->typtype == TYPTYPE_DOMAIN)
1676 {
1677 /* If it's domain over composite, copy base type's properties */
1678 TypeCacheEntry *baseentry;
1679
1680 /* load up basetype info if we didn't already */
1681 if (typentry->domainBaseType == InvalidOid)
1682 {
1683 typentry->domainBaseTypmod = -1;
1684 typentry->domainBaseType =
1685 getBaseTypeAndTypmod(typentry->type_id,
1686 &typentry->domainBaseTypmod);
1687 }
1688 baseentry = lookup_type_cache(typentry->domainBaseType,
1693 if (baseentry->typtype == TYPTYPE_COMPOSITE)
1694 {
1696 typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1700 }
1701 }
1703}
1704
1705/*
1706 * Likewise, some helper functions for range and multirange types.
1707 *
1708 * We can borrow the flag bits for array element properties to use for range
1709 * element properties, since those flag bits otherwise have no use in a
1710 * range or multirange type's typcache entry.
1711 */
1712
1713static bool
1715{
1716 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1718 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1719}
1720
1721static bool
1723{
1724 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1726 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1727}
1728
1729static void
1731{
1732 /* load up subtype link if we didn't already */
1733 if (typentry->rngelemtype == NULL &&
1734 typentry->typtype == TYPTYPE_RANGE)
1735 load_rangetype_info(typentry);
1736
1737 if (typentry->rngelemtype != NULL)
1738 {
1739 TypeCacheEntry *elementry;
1740
1741 /* might need to calculate subtype's hash function properties */
1742 elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1745 if (OidIsValid(elementry->hash_proc))
1746 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1747 if (OidIsValid(elementry->hash_extended_proc))
1749 }
1751}
1752
1753static bool
1755{
1756 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1758 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1759}
1760
1761static bool
1763{
1764 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1766 return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1767}
1768
1769static void
1771{
1772 /* load up range link if we didn't already */
1773 if (typentry->rngtype == NULL &&
1774 typentry->typtype == TYPTYPE_MULTIRANGE)
1775 load_multirangetype_info(typentry);
1776
1777 if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1778 {
1779 TypeCacheEntry *elementry;
1780
1781 /* might need to calculate subtype's hash function properties */
1782 elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1785 if (OidIsValid(elementry->hash_proc))
1786 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1787 if (OidIsValid(elementry->hash_extended_proc))
1789 }
1791}
1792
1793/*
1794 * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1795 * to store 'typmod'.
1796 */
1797static void
1799{
1800 if (RecordCacheArray == NULL)
1801 {
1804 64 * sizeof(RecordCacheArrayEntry));
1806 }
1807
1808 if (typmod >= RecordCacheArrayLen)
1809 {
1810 int32 newlen = pg_nextpower2_32(typmod + 1);
1811
1815 newlen);
1816 RecordCacheArrayLen = newlen;
1817 }
1818}
1819
1820/*
1821 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1822 *
1823 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1824 * hasn't had its refcount bumped.
1825 */
1826static TupleDesc
1827lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1828{
1829 if (type_id != RECORDOID)
1830 {
1831 /*
1832 * It's a named composite type, so use the regular typcache.
1833 */
1834 TypeCacheEntry *typentry;
1835
1836 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1837 if (typentry->tupDesc == NULL && !noError)
1838 ereport(ERROR,
1839 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1840 errmsg("type %s is not composite",
1841 format_type_be(type_id))));
1842 return typentry->tupDesc;
1843 }
1844 else
1845 {
1846 /*
1847 * It's a transient record type, so look in our record-type table.
1848 */
1849 if (typmod >= 0)
1850 {
1851 /* It is already in our local cache? */
1852 if (typmod < RecordCacheArrayLen &&
1853 RecordCacheArray[typmod].tupdesc != NULL)
1854 return RecordCacheArray[typmod].tupdesc;
1855
1856 /* Are we attached to a shared record typmod registry? */
1858 {
1860
1861 /* Try to find it in the shared typmod index. */
1863 &typmod, false);
1864 if (entry != NULL)
1865 {
1866 TupleDesc tupdesc;
1867
1868 tupdesc = (TupleDesc)
1870 entry->shared_tupdesc);
1871 Assert(typmod == tupdesc->tdtypmod);
1872
1873 /* We may need to extend the local RecordCacheArray. */
1875
1876 /*
1877 * Our local array can now point directly to the TupleDesc
1878 * in shared memory, which is non-reference-counted.
1879 */
1880 RecordCacheArray[typmod].tupdesc = tupdesc;
1881 Assert(tupdesc->tdrefcount == -1);
1882
1883 /*
1884 * We don't share tupdesc identifiers across processes, so
1885 * assign one locally.
1886 */
1888
1890 entry);
1891
1892 return RecordCacheArray[typmod].tupdesc;
1893 }
1894 }
1895 }
1896
1897 if (!noError)
1898 ereport(ERROR,
1899 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1900 errmsg("record type has not been registered")));
1901 return NULL;
1902 }
1903}
1904
1905/*
1906 * lookup_rowtype_tupdesc
1907 *
1908 * Given a typeid/typmod that should describe a known composite type,
1909 * return the tuple descriptor for the type. Will ereport on failure.
1910 * (Use ereport because this is reachable with user-specified OIDs,
1911 * for example from record_in().)
1912 *
1913 * Note: on success, we increment the refcount of the returned TupleDesc,
1914 * and log the reference in CurrentResourceOwner. Caller must call
1915 * ReleaseTupleDesc when done using the tupdesc. (There are some
1916 * cases in which the returned tupdesc is not refcounted, in which
1917 * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1918 * the tupdesc is guaranteed to live till process exit.)
1919 */
1922{
1923 TupleDesc tupDesc;
1924
1925 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1926 PinTupleDesc(tupDesc);
1927 return tupDesc;
1928}
1929
1930/*
1931 * lookup_rowtype_tupdesc_noerror
1932 *
1933 * As above, but if the type is not a known composite type and noError
1934 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1935 * type_id is passed, you'll get an ereport anyway.)
1936 */
1938lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1939{
1940 TupleDesc tupDesc;
1941
1942 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1943 if (tupDesc != NULL)
1944 PinTupleDesc(tupDesc);
1945 return tupDesc;
1946}
1947
1948/*
1949 * lookup_rowtype_tupdesc_copy
1950 *
1951 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1952 * copied into the CurrentMemoryContext and is not reference-counted.
1953 */
1956{
1957 TupleDesc tmp;
1958
1959 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1960 return CreateTupleDescCopyConstr(tmp);
1961}
1962
1963/*
1964 * lookup_rowtype_tupdesc_domain
1965 *
1966 * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1967 * a domain over a named composite type; so this is effectively equivalent to
1968 * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1969 * except for being a tad faster.
1970 *
1971 * Note: the reason we don't fold the look-through-domain behavior into plain
1972 * lookup_rowtype_tupdesc() is that we want callers to know they might be
1973 * dealing with a domain. Otherwise they might construct a tuple that should
1974 * be of the domain type, but not apply domain constraints.
1975 */
1977lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1978{
1979 TupleDesc tupDesc;
1980
1981 if (type_id != RECORDOID)
1982 {
1983 /*
1984 * Check for domain or named composite type. We might as well load
1985 * whichever data is needed.
1986 */
1987 TypeCacheEntry *typentry;
1988
1989 typentry = lookup_type_cache(type_id,
1992 if (typentry->typtype == TYPTYPE_DOMAIN)
1994 typentry->domainBaseTypmod,
1995 noError);
1996 if (typentry->tupDesc == NULL && !noError)
1997 ereport(ERROR,
1998 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1999 errmsg("type %s is not composite",
2000 format_type_be(type_id))));
2001 tupDesc = typentry->tupDesc;
2002 }
2003 else
2004 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2005 if (tupDesc != NULL)
2006 PinTupleDesc(tupDesc);
2007 return tupDesc;
2008}
2009
2010/*
2011 * Hash function for the hash table of RecordCacheEntry.
2012 */
2013static uint32
2014record_type_typmod_hash(const void *data, size_t size)
2015{
2017
2018 return hashRowType(entry->tupdesc);
2019}
2020
2021/*
2022 * Match function for the hash table of RecordCacheEntry.
2023 */
2024static int
2025record_type_typmod_compare(const void *a, const void *b, size_t size)
2026{
2028 RecordCacheEntry *right = (RecordCacheEntry *) b;
2029
2030 return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2031}
2032
2033/*
2034 * assign_record_type_typmod
2035 *
2036 * Given a tuple descriptor for a RECORD type, find or create a cache entry
2037 * for the type, and set the tupdesc's tdtypmod field to a value that will
2038 * identify this cache entry to lookup_rowtype_tupdesc.
2039 */
2040void
2042{
2043 RecordCacheEntry *recentry;
2044 TupleDesc entDesc;
2045 bool found;
2046 MemoryContext oldcxt;
2047
2048 Assert(tupDesc->tdtypeid == RECORDOID);
2049
2050 if (RecordCacheHash == NULL)
2051 {
2052 /* First time through: initialize the hash table */
2053 HASHCTL ctl;
2054
2055 ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2056 ctl.entrysize = sizeof(RecordCacheEntry);
2059 RecordCacheHash = hash_create("Record information cache", 64,
2060 &ctl,
2062
2063 /* Also make sure CacheMemoryContext exists */
2064 if (!CacheMemoryContext)
2066 }
2067
2068 /*
2069 * Find a hashtable entry for this tuple descriptor. We don't use
2070 * HASH_ENTER yet, because if it's missing, we need to make sure that all
2071 * the allocations succeed before we create the new entry.
2072 */
2074 &tupDesc,
2075 HASH_FIND, &found);
2076 if (found && recentry->tupdesc != NULL)
2077 {
2078 tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2079 return;
2080 }
2081
2082 /* Not present, so need to manufacture an entry */
2084
2085 /* Look in the SharedRecordTypmodRegistry, if attached */
2086 entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2087 if (entDesc == NULL)
2088 {
2089 /*
2090 * Make sure we have room before we CreateTupleDescCopy() or advance
2091 * NextRecordTypmod.
2092 */
2094
2095 /* Reference-counted local cache only. */
2096 entDesc = CreateTupleDescCopy(tupDesc);
2097 entDesc->tdrefcount = 1;
2098 entDesc->tdtypmod = NextRecordTypmod++;
2099 }
2100 else
2101 {
2103 }
2104
2105 RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2106
2107 /* Assign a unique tupdesc identifier, too. */
2109
2110 /* Fully initialized; create the hash table entry */
2112 &tupDesc,
2113 HASH_ENTER, NULL);
2114 recentry->tupdesc = entDesc;
2115
2116 /* Update the caller's tuple descriptor. */
2117 tupDesc->tdtypmod = entDesc->tdtypmod;
2118
2119 MemoryContextSwitchTo(oldcxt);
2120}
2121
2122/*
2123 * assign_record_type_identifier
2124 *
2125 * Get an identifier, which will be unique over the lifespan of this backend
2126 * process, for the current tuple descriptor of the specified composite type.
2127 * For named composite types, the value is guaranteed to change if the type's
2128 * definition does. For registered RECORD types, the value will not change
2129 * once assigned, since the registered type won't either. If an anonymous
2130 * RECORD type is specified, we return a new identifier on each call.
2131 */
2132uint64
2134{
2135 if (type_id != RECORDOID)
2136 {
2137 /*
2138 * It's a named composite type, so use the regular typcache.
2139 */
2140 TypeCacheEntry *typentry;
2141
2142 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2143 if (typentry->tupDesc == NULL)
2144 ereport(ERROR,
2145 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2146 errmsg("type %s is not composite",
2147 format_type_be(type_id))));
2148 Assert(typentry->tupDesc_identifier != 0);
2149 return typentry->tupDesc_identifier;
2150 }
2151 else
2152 {
2153 /*
2154 * It's a transient record type, so look in our record-type table.
2155 */
2156 if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2157 RecordCacheArray[typmod].tupdesc != NULL)
2158 {
2159 Assert(RecordCacheArray[typmod].id != 0);
2160 return RecordCacheArray[typmod].id;
2161 }
2162
2163 /* For anonymous or unrecognized record type, generate a new ID */
2164 return ++tupledesc_id_counter;
2165 }
2166}
2167
2168/*
2169 * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2170 * This exists only to avoid exposing private innards of
2171 * SharedRecordTypmodRegistry in a header.
2172 */
2173size_t
2175{
2176 return sizeof(SharedRecordTypmodRegistry);
2177}
2178
2179/*
2180 * Initialize 'registry' in a pre-existing shared memory region, which must be
2181 * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2182 * bytes.
2183 *
2184 * 'area' will be used to allocate shared memory space as required for the
2185 * typemod registration. The current process, expected to be a leader process
2186 * in a parallel query, will be attached automatically and its current record
2187 * types will be loaded into *registry. While attached, all calls to
2188 * assign_record_type_typmod will use the shared registry. Worker backends
2189 * will need to attach explicitly.
2190 *
2191 * Note that this function takes 'area' and 'segment' as arguments rather than
2192 * accessing them via CurrentSession, because they aren't installed there
2193 * until after this function runs.
2194 */
2195void
2197 dsm_segment *segment,
2198 dsa_area *area)
2199{
2200 MemoryContext old_context;
2201 dshash_table *record_table;
2202 dshash_table *typmod_table;
2203 int32 typmod;
2204
2206
2207 /* We can't already be attached to a shared registry. */
2211
2213
2214 /* Create the hash table of tuple descriptors indexed by themselves. */
2215 record_table = dshash_create(area, &srtr_record_table_params, area);
2216
2217 /* Create the hash table of tuple descriptors indexed by typmod. */
2218 typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2219
2220 MemoryContextSwitchTo(old_context);
2221
2222 /* Initialize the SharedRecordTypmodRegistry. */
2223 registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2224 registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2226
2227 /*
2228 * Copy all entries from this backend's private registry into the shared
2229 * registry.
2230 */
2231 for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2232 {
2233 SharedTypmodTableEntry *typmod_table_entry;
2234 SharedRecordTableEntry *record_table_entry;
2235 SharedRecordTableKey record_table_key;
2236 dsa_pointer shared_dp;
2237 TupleDesc tupdesc;
2238 bool found;
2239
2240 tupdesc = RecordCacheArray[typmod].tupdesc;
2241 if (tupdesc == NULL)
2242 continue;
2243
2244 /* Copy the TupleDesc into shared memory. */
2245 shared_dp = share_tupledesc(area, tupdesc, typmod);
2246
2247 /* Insert into the typmod table. */
2248 typmod_table_entry = dshash_find_or_insert(typmod_table,
2249 &tupdesc->tdtypmod,
2250 &found);
2251 if (found)
2252 elog(ERROR, "cannot create duplicate shared record typmod");
2253 typmod_table_entry->typmod = tupdesc->tdtypmod;
2254 typmod_table_entry->shared_tupdesc = shared_dp;
2255 dshash_release_lock(typmod_table, typmod_table_entry);
2256
2257 /* Insert into the record table. */
2258 record_table_key.shared = false;
2259 record_table_key.u.local_tupdesc = tupdesc;
2260 record_table_entry = dshash_find_or_insert(record_table,
2261 &record_table_key,
2262 &found);
2263 if (!found)
2264 {
2265 record_table_entry->key.shared = true;
2266 record_table_entry->key.u.shared_tupdesc = shared_dp;
2267 }
2268 dshash_release_lock(record_table, record_table_entry);
2269 }
2270
2271 /*
2272 * Set up the global state that will tell assign_record_type_typmod and
2273 * lookup_rowtype_tupdesc_internal about the shared registry.
2274 */
2275 CurrentSession->shared_record_table = record_table;
2276 CurrentSession->shared_typmod_table = typmod_table;
2278
2279 /*
2280 * We install a detach hook in the leader, but only to handle cleanup on
2281 * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2282 * the memory, the leader process will use a shared registry until it
2283 * exits.
2284 */
2286}
2287
2288/*
2289 * Attach to 'registry', which must have been initialized already by another
2290 * backend. Future calls to assign_record_type_typmod and
2291 * lookup_rowtype_tupdesc_internal will use the shared registry until the
2292 * current session is detached.
2293 */
2294void
2296{
2297 MemoryContext old_context;
2298 dshash_table *record_table;
2299 dshash_table *typmod_table;
2300
2302
2303 /* We can't already be attached to a shared registry. */
2304 Assert(CurrentSession != NULL);
2305 Assert(CurrentSession->segment != NULL);
2306 Assert(CurrentSession->area != NULL);
2310
2311 /*
2312 * We can't already have typmods in our local cache, because they'd clash
2313 * with those imported by SharedRecordTypmodRegistryInit. This should be
2314 * a freshly started parallel worker. If we ever support worker
2315 * recycling, a worker would need to zap its local cache in between
2316 * servicing different queries, in order to be able to call this and
2317 * synchronize typmods with a new leader; but that's problematic because
2318 * we can't be very sure that record-typmod-related state hasn't escaped
2319 * to anywhere else in the process.
2320 */
2322
2324
2325 /* Attach to the two hash tables. */
2326 record_table = dshash_attach(CurrentSession->area,
2328 registry->record_table_handle,
2330 typmod_table = dshash_attach(CurrentSession->area,
2332 registry->typmod_table_handle,
2333 NULL);
2334
2335 MemoryContextSwitchTo(old_context);
2336
2337 /*
2338 * Set up detach hook to run at worker exit. Currently this is the same
2339 * as the leader's detach hook, but in future they might need to be
2340 * different.
2341 */
2344 PointerGetDatum(registry));
2345
2346 /*
2347 * Set up the session state that will tell assign_record_type_typmod and
2348 * lookup_rowtype_tupdesc_internal about the shared registry.
2349 */
2351 CurrentSession->shared_record_table = record_table;
2352 CurrentSession->shared_typmod_table = typmod_table;
2353}
2354
2355/*
2356 * InvalidateCompositeTypeCacheEntry
2357 * Invalidate particular TypeCacheEntry on Relcache inval callback
2358 *
2359 * Delete the cached tuple descriptor (if any) for the given composite
2360 * type, and reset whatever info we have cached about the composite type's
2361 * comparability.
2362 */
2363static void
2365{
2366 bool hadTupDescOrOpclass;
2367
2368 Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2369 OidIsValid(typentry->typrelid));
2370
2371 hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2372 (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2373
2374 /* Delete tupdesc if we have it */
2375 if (typentry->tupDesc != NULL)
2376 {
2377 /*
2378 * Release our refcount and free the tupdesc if none remain. We can't
2379 * use DecrTupleDescRefCount here because this reference is not logged
2380 * by the current resource owner.
2381 */
2382 Assert(typentry->tupDesc->tdrefcount > 0);
2383 if (--typentry->tupDesc->tdrefcount == 0)
2384 FreeTupleDesc(typentry->tupDesc);
2385 typentry->tupDesc = NULL;
2386
2387 /*
2388 * Also clear tupDesc_identifier, so that anyone watching it will
2389 * realize that the tupdesc has changed.
2390 */
2391 typentry->tupDesc_identifier = 0;
2392 }
2393
2394 /* Reset equality/comparison/hashing validity information */
2395 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2396
2397 /*
2398 * Call delete_rel_type_cache_if_needed() if we actually cleared
2399 * something.
2400 */
2401 if (hadTupDescOrOpclass)
2403}
2404
2405/*
2406 * TypeCacheRelCallback
2407 * Relcache inval callback function
2408 *
2409 * Delete the cached tuple descriptor (if any) for the given rel's composite
2410 * type, or for all composite types if relid == InvalidOid. Also reset
2411 * whatever info we have cached about the composite type's comparability.
2412 *
2413 * This is called when a relcache invalidation event occurs for the given
2414 * relid. We can't use syscache to find a type corresponding to the given
2415 * relation because the code can be called outside of transaction. Thus, we
2416 * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2417 */
2418static void
2420{
2421 TypeCacheEntry *typentry;
2422
2423 /*
2424 * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2425 * callback wouldn't be registered
2426 */
2427 if (OidIsValid(relid))
2428 {
2429 RelIdToTypeIdCacheEntry *relentry;
2430
2431 /*
2432 * Find an RelIdToTypeIdCacheHash entry, which should exist as soon as
2433 * corresponding typcache entry has something to clean.
2434 */
2436 &relid,
2437 HASH_FIND, NULL);
2438
2439 if (relentry != NULL)
2440 {
2442 &relentry->composite_typid,
2443 HASH_FIND, NULL);
2444
2445 if (typentry != NULL)
2446 {
2447 Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2448 Assert(relid == typentry->typrelid);
2449
2451 }
2452 }
2453
2454 /*
2455 * Visit all the domain types sequentially. Typically, this shouldn't
2456 * affect performance since domain types are less tended to bloat.
2457 * Domain types are created manually, unlike composite types which are
2458 * automatically created for every temporary table.
2459 */
2460 for (typentry = firstDomainTypeEntry;
2461 typentry != NULL;
2462 typentry = typentry->nextDomain)
2463 {
2464 /*
2465 * If it's domain over composite, reset flags. (We don't bother
2466 * trying to determine whether the specific base type needs a
2467 * reset.) Note that if we haven't determined whether the base
2468 * type is composite, we don't need to reset anything.
2469 */
2471 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2472 }
2473 }
2474 else
2475 {
2476 HASH_SEQ_STATUS status;
2477
2478 /*
2479 * Relid is invalid. By convention, we need to reset all composite
2480 * types in cache. Also, we should reset flags for domain types, and
2481 * we loop over all entries in hash, so, do it in a single scan.
2482 */
2483 hash_seq_init(&status, TypeCacheHash);
2484 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2485 {
2486 if (typentry->typtype == TYPTYPE_COMPOSITE)
2487 {
2489 }
2490 else if (typentry->typtype == TYPTYPE_DOMAIN)
2491 {
2492 /*
2493 * If it's domain over composite, reset flags. (We don't
2494 * bother trying to determine whether the specific base type
2495 * needs a reset.) Note that if we haven't determined whether
2496 * the base type is composite, we don't need to reset
2497 * anything.
2498 */
2500 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2501 }
2502 }
2503 }
2504}
2505
2506/*
2507 * TypeCacheTypCallback
2508 * Syscache inval callback function
2509 *
2510 * This is called when a syscache invalidation event occurs for any
2511 * pg_type row. If we have information cached about that type, mark
2512 * it as needing to be reloaded.
2513 */
2514static void
2515TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2516{
2517 HASH_SEQ_STATUS status;
2518 TypeCacheEntry *typentry;
2519
2520 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2521
2522 /*
2523 * By convention, zero hash value is passed to the callback as a sign that
2524 * it's time to invalidate the whole cache. See sinval.c, inval.c and
2525 * InvalidateSystemCachesExtended().
2526 */
2527 if (hashvalue == 0)
2528 hash_seq_init(&status, TypeCacheHash);
2529 else
2530 hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2531
2532 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2533 {
2534 bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2535
2536 Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2537
2538 /*
2539 * Mark the data obtained directly from pg_type as invalid. Also, if
2540 * it's a domain, typnotnull might've changed, so we'll need to
2541 * recalculate its constraints.
2542 */
2543 typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2545
2546 /*
2547 * Call delete_rel_type_cache_if_needed() if we cleaned
2548 * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2549 */
2550 if (hadPgTypeData)
2552 }
2553}
2554
2555/*
2556 * TypeCacheOpcCallback
2557 * Syscache inval callback function
2558 *
2559 * This is called when a syscache invalidation event occurs for any pg_opclass
2560 * row. In principle we could probably just invalidate data dependent on the
2561 * particular opclass, but since updates on pg_opclass are rare in production
2562 * it doesn't seem worth a lot of complication: we just mark all cached data
2563 * invalid.
2564 *
2565 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2566 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2567 * is not allowed to be used to add/drop the primary operators and functions
2568 * of an opclass, only cross-type members of a family; and the latter sorts
2569 * of members are not going to get cached here.
2570 */
2571static void
2572TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2573{
2574 HASH_SEQ_STATUS status;
2575 TypeCacheEntry *typentry;
2576
2577 /* TypeCacheHash must exist, else this callback wouldn't be registered */
2578 hash_seq_init(&status, TypeCacheHash);
2579 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2580 {
2581 bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2582
2583 /* Reset equality/comparison/hashing validity information */
2584 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2585
2586 /*
2587 * Call delete_rel_type_cache_if_needed() if we actually cleared some
2588 * of TCFLAGS_OPERATOR_FLAGS.
2589 */
2590 if (hadOpclass)
2592 }
2593}
2594
2595/*
2596 * TypeCacheConstrCallback
2597 * Syscache inval callback function
2598 *
2599 * This is called when a syscache invalidation event occurs for any
2600 * pg_constraint row. We flush information about domain constraints
2601 * when this happens.
2602 *
2603 * It's slightly annoying that we can't tell whether the inval event was for
2604 * a domain constraint record or not; there's usually more update traffic
2605 * for table constraints than domain constraints, so we'll do a lot of
2606 * useless flushes. Still, this is better than the old no-caching-at-all
2607 * approach to domain constraints.
2608 */
2609static void
2610TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2611{
2612 TypeCacheEntry *typentry;
2613
2614 /*
2615 * Because this is called very frequently, and typically very few of the
2616 * typcache entries are for domains, we don't use hash_seq_search here.
2617 * Instead we thread all the domain-type entries together so that we can
2618 * visit them cheaply.
2619 */
2620 for (typentry = firstDomainTypeEntry;
2621 typentry != NULL;
2622 typentry = typentry->nextDomain)
2623 {
2624 /* Reset domain constraint validity information */
2625 typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2626 }
2627}
2628
2629
2630/*
2631 * Check if given OID is part of the subset that's sortable by comparisons
2632 */
2633static inline bool
2635{
2636 Oid offset;
2637
2638 if (arg < enumdata->bitmap_base)
2639 return false;
2640 offset = arg - enumdata->bitmap_base;
2641 if (offset > (Oid) INT_MAX)
2642 return false;
2643 return bms_is_member((int) offset, enumdata->sorted_values);
2644}
2645
2646
2647/*
2648 * compare_values_of_enum
2649 * Compare two members of an enum type.
2650 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2651 *
2652 * Note: currently, the enumData cache is refreshed only if we are asked
2653 * to compare an enum value that is not already in the cache. This is okay
2654 * because there is no support for re-ordering existing values, so comparisons
2655 * of previously cached values will return the right answer even if other
2656 * values have been added since we last loaded the cache.
2657 *
2658 * Note: the enum logic has a special-case rule about even-numbered versus
2659 * odd-numbered OIDs, but we take no account of that rule here; this
2660 * routine shouldn't even get called when that rule applies.
2661 */
2662int
2664{
2665 TypeCacheEnumData *enumdata;
2666 EnumItem *item1;
2667 EnumItem *item2;
2668
2669 /*
2670 * Equal OIDs are certainly equal --- this case was probably handled by
2671 * our caller, but we may as well check.
2672 */
2673 if (arg1 == arg2)
2674 return 0;
2675
2676 /* Load up the cache if first time through */
2677 if (tcache->enumData == NULL)
2678 load_enum_cache_data(tcache);
2679 enumdata = tcache->enumData;
2680
2681 /*
2682 * If both OIDs are known-sorted, we can just compare them directly.
2683 */
2684 if (enum_known_sorted(enumdata, arg1) &&
2685 enum_known_sorted(enumdata, arg2))
2686 {
2687 if (arg1 < arg2)
2688 return -1;
2689 else
2690 return 1;
2691 }
2692
2693 /*
2694 * Slow path: we have to identify their actual sort-order positions.
2695 */
2696 item1 = find_enumitem(enumdata, arg1);
2697 item2 = find_enumitem(enumdata, arg2);
2698
2699 if (item1 == NULL || item2 == NULL)
2700 {
2701 /*
2702 * We couldn't find one or both values. That means the enum has
2703 * changed under us, so re-initialize the cache and try again. We
2704 * don't bother retrying the known-sorted case in this path.
2705 */
2706 load_enum_cache_data(tcache);
2707 enumdata = tcache->enumData;
2708
2709 item1 = find_enumitem(enumdata, arg1);
2710 item2 = find_enumitem(enumdata, arg2);
2711
2712 /*
2713 * If we still can't find the values, complain: we must have corrupt
2714 * data.
2715 */
2716 if (item1 == NULL)
2717 elog(ERROR, "enum value %u not found in cache for enum %s",
2718 arg1, format_type_be(tcache->type_id));
2719 if (item2 == NULL)
2720 elog(ERROR, "enum value %u not found in cache for enum %s",
2721 arg2, format_type_be(tcache->type_id));
2722 }
2723
2724 if (item1->sort_order < item2->sort_order)
2725 return -1;
2726 else if (item1->sort_order > item2->sort_order)
2727 return 1;
2728 else
2729 return 0;
2730}
2731
2732/*
2733 * Load (or re-load) the enumData member of the typcache entry.
2734 */
2735static void
2737{
2738 TypeCacheEnumData *enumdata;
2739 Relation enum_rel;
2740 SysScanDesc enum_scan;
2741 HeapTuple enum_tuple;
2742 ScanKeyData skey;
2743 EnumItem *items;
2744 int numitems;
2745 int maxitems;
2746 Oid bitmap_base;
2747 Bitmapset *bitmap;
2748 MemoryContext oldcxt;
2749 int bm_size,
2750 start_pos;
2751
2752 /* Check that this is actually an enum */
2753 if (tcache->typtype != TYPTYPE_ENUM)
2754 ereport(ERROR,
2755 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2756 errmsg("%s is not an enum",
2757 format_type_be(tcache->type_id))));
2758
2759 /*
2760 * Read all the information for members of the enum type. We collect the
2761 * info in working memory in the caller's context, and then transfer it to
2762 * permanent memory in CacheMemoryContext. This minimizes the risk of
2763 * leaking memory from CacheMemoryContext in the event of an error partway
2764 * through.
2765 */
2766 maxitems = 64;
2767 items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2768 numitems = 0;
2769
2770 /* Scan pg_enum for the members of the target enum type. */
2771 ScanKeyInit(&skey,
2772 Anum_pg_enum_enumtypid,
2773 BTEqualStrategyNumber, F_OIDEQ,
2774 ObjectIdGetDatum(tcache->type_id));
2775
2776 enum_rel = table_open(EnumRelationId, AccessShareLock);
2777 enum_scan = systable_beginscan(enum_rel,
2778 EnumTypIdLabelIndexId,
2779 true, NULL,
2780 1, &skey);
2781
2782 while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2783 {
2784 Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2785
2786 if (numitems >= maxitems)
2787 {
2788 maxitems *= 2;
2789 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2790 }
2791 items[numitems].enum_oid = en->oid;
2792 items[numitems].sort_order = en->enumsortorder;
2793 numitems++;
2794 }
2795
2796 systable_endscan(enum_scan);
2797 table_close(enum_rel, AccessShareLock);
2798
2799 /* Sort the items into OID order */
2800 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2801
2802 /*
2803 * Here, we create a bitmap listing a subset of the enum's OIDs that are
2804 * known to be in order and can thus be compared with just OID comparison.
2805 *
2806 * The point of this is that the enum's initial OIDs were certainly in
2807 * order, so there is some subset that can be compared via OID comparison;
2808 * and we'd rather not do binary searches unnecessarily.
2809 *
2810 * This is somewhat heuristic, and might identify a subset of OIDs that
2811 * isn't exactly what the type started with. That's okay as long as the
2812 * subset is correctly sorted.
2813 */
2814 bitmap_base = InvalidOid;
2815 bitmap = NULL;
2816 bm_size = 1; /* only save sets of at least 2 OIDs */
2817
2818 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2819 {
2820 /*
2821 * Identify longest sorted subsequence starting at start_pos
2822 */
2823 Bitmapset *this_bitmap = bms_make_singleton(0);
2824 int this_bm_size = 1;
2825 Oid start_oid = items[start_pos].enum_oid;
2826 float4 prev_order = items[start_pos].sort_order;
2827 int i;
2828
2829 for (i = start_pos + 1; i < numitems; i++)
2830 {
2831 Oid offset;
2832
2833 offset = items[i].enum_oid - start_oid;
2834 /* quit if bitmap would be too large; cutoff is arbitrary */
2835 if (offset >= 8192)
2836 break;
2837 /* include the item if it's in-order */
2838 if (items[i].sort_order > prev_order)
2839 {
2840 prev_order = items[i].sort_order;
2841 this_bitmap = bms_add_member(this_bitmap, (int) offset);
2842 this_bm_size++;
2843 }
2844 }
2845
2846 /* Remember it if larger than previous best */
2847 if (this_bm_size > bm_size)
2848 {
2849 bms_free(bitmap);
2850 bitmap_base = start_oid;
2851 bitmap = this_bitmap;
2852 bm_size = this_bm_size;
2853 }
2854 else
2855 bms_free(this_bitmap);
2856
2857 /*
2858 * Done if it's not possible to find a longer sequence in the rest of
2859 * the list. In typical cases this will happen on the first
2860 * iteration, which is why we create the bitmaps on the fly instead of
2861 * doing a second pass over the list.
2862 */
2863 if (bm_size >= (numitems - start_pos - 1))
2864 break;
2865 }
2866
2867 /* OK, copy the data into CacheMemoryContext */
2869 enumdata = (TypeCacheEnumData *)
2870 palloc(offsetof(TypeCacheEnumData, enum_values) +
2871 numitems * sizeof(EnumItem));
2872 enumdata->bitmap_base = bitmap_base;
2873 enumdata->sorted_values = bms_copy(bitmap);
2874 enumdata->num_values = numitems;
2875 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2876 MemoryContextSwitchTo(oldcxt);
2877
2878 pfree(items);
2879 bms_free(bitmap);
2880
2881 /* And link the finished cache struct into the typcache */
2882 if (tcache->enumData != NULL)
2883 pfree(tcache->enumData);
2884 tcache->enumData = enumdata;
2885}
2886
2887/*
2888 * Locate the EnumItem with the given OID, if present
2889 */
2890static EnumItem *
2892{
2893 EnumItem srch;
2894
2895 /* On some versions of Solaris, bsearch of zero items dumps core */
2896 if (enumdata->num_values <= 0)
2897 return NULL;
2898
2899 srch.enum_oid = arg;
2900 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2901 sizeof(EnumItem), enum_oid_cmp);
2902}
2903
2904/*
2905 * qsort comparison function for OID-ordered EnumItems
2906 */
2907static int
2908enum_oid_cmp(const void *left, const void *right)
2909{
2910 const EnumItem *l = (const EnumItem *) left;
2911 const EnumItem *r = (const EnumItem *) right;
2912
2913 return pg_cmp_u32(l->enum_oid, r->enum_oid);
2914}
2915
2916/*
2917 * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2918 * to the given value and return a dsa_pointer.
2919 */
2920static dsa_pointer
2922{
2923 dsa_pointer shared_dp;
2924 TupleDesc shared;
2925
2926 shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2927 shared = (TupleDesc) dsa_get_address(area, shared_dp);
2928 TupleDescCopy(shared, tupdesc);
2929 shared->tdtypmod = typmod;
2930
2931 return shared_dp;
2932}
2933
2934/*
2935 * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2936 * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2937 * Tuple descriptors returned by this function are not reference counted, and
2938 * will exist at least as long as the current backend remained attached to the
2939 * current session.
2940 */
2941static TupleDesc
2943{
2944 TupleDesc result;
2946 SharedRecordTableEntry *record_table_entry;
2947 SharedTypmodTableEntry *typmod_table_entry;
2948 dsa_pointer shared_dp;
2949 bool found;
2950 uint32 typmod;
2951
2952 /* If not even attached, nothing to do. */
2954 return NULL;
2955
2956 /* Try to find a matching tuple descriptor in the record table. */
2957 key.shared = false;
2958 key.u.local_tupdesc = tupdesc;
2959 record_table_entry = (SharedRecordTableEntry *)
2961 if (record_table_entry)
2962 {
2963 Assert(record_table_entry->key.shared);
2965 record_table_entry);
2966 result = (TupleDesc)
2968 record_table_entry->key.u.shared_tupdesc);
2969 Assert(result->tdrefcount == -1);
2970
2971 return result;
2972 }
2973
2974 /* Allocate a new typmod number. This will be wasted if we error out. */
2975 typmod = (int)
2977 1);
2978
2979 /* Copy the TupleDesc into shared memory. */
2980 shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2981
2982 /*
2983 * Create an entry in the typmod table so that others will understand this
2984 * typmod number.
2985 */
2986 PG_TRY();
2987 {
2988 typmod_table_entry = (SharedTypmodTableEntry *)
2990 &typmod, &found);
2991 if (found)
2992 elog(ERROR, "cannot create duplicate shared record typmod");
2993 }
2994 PG_CATCH();
2995 {
2996 dsa_free(CurrentSession->area, shared_dp);
2997 PG_RE_THROW();
2998 }
2999 PG_END_TRY();
3000 typmod_table_entry->typmod = typmod;
3001 typmod_table_entry->shared_tupdesc = shared_dp;
3003 typmod_table_entry);
3004
3005 /*
3006 * Finally create an entry in the record table so others with matching
3007 * tuple descriptors can reuse the typmod.
3008 */
3009 record_table_entry = (SharedRecordTableEntry *)
3011 &found);
3012 if (found)
3013 {
3014 /*
3015 * Someone concurrently inserted a matching tuple descriptor since the
3016 * first time we checked. Use that one instead.
3017 */
3019 record_table_entry);
3020
3021 /* Might as well free up the space used by the one we created. */
3023 &typmod);
3024 Assert(found);
3025 dsa_free(CurrentSession->area, shared_dp);
3026
3027 /* Return the one we found. */
3028 Assert(record_table_entry->key.shared);
3029 result = (TupleDesc)
3031 record_table_entry->key.u.shared_tupdesc);
3032 Assert(result->tdrefcount == -1);
3033
3034 return result;
3035 }
3036
3037 /* Store it and return it. */
3038 record_table_entry->key.shared = true;
3039 record_table_entry->key.u.shared_tupdesc = shared_dp;
3041 record_table_entry);
3042 result = (TupleDesc)
3043 dsa_get_address(CurrentSession->area, shared_dp);
3044 Assert(result->tdrefcount == -1);
3045
3046 return result;
3047}
3048
3049/*
3050 * On-DSM-detach hook to forget about the current shared record typmod
3051 * infrastructure. This is currently used by both leader and workers.
3052 */
3053static void
3055{
3056 /* Be cautious here: maybe we didn't finish initializing. */
3058 {
3061 }
3063 {
3066 }
3068}
3069
3070/*
3071 * Insert RelIdToTypeIdCacheHash entry if needed.
3072 */
3073static void
3075{
3076 /* Immediately quit for non-composite types */
3077 if (typentry->typtype != TYPTYPE_COMPOSITE)
3078 return;
3079
3080 /* typrelid should be given for composite types */
3081 Assert(OidIsValid(typentry->typrelid));
3082
3083 /*
3084 * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3085 * information indicating it should be here.
3086 */
3087 if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3088 (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3089 typentry->tupDesc != NULL)
3090 {
3091 RelIdToTypeIdCacheEntry *relentry;
3092 bool found;
3093
3095 &typentry->typrelid,
3096 HASH_ENTER, &found);
3097 relentry->relid = typentry->typrelid;
3098 relentry->composite_typid = typentry->type_id;
3099 }
3100}
3101
3102/*
3103 * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3104 * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3105 * or tupDesc.
3106 */
3107static void
3109{
3110#ifdef USE_ASSERT_CHECKING
3111 int i;
3112 bool is_in_progress = false;
3113
3114 for (i = 0; i < in_progress_list_len; i++)
3115 {
3116 if (in_progress_list[i] == typentry->type_id)
3117 {
3118 is_in_progress = true;
3119 break;
3120 }
3121 }
3122#endif
3123
3124 /* Immediately quit for non-composite types */
3125 if (typentry->typtype != TYPTYPE_COMPOSITE)
3126 return;
3127
3128 /* typrelid should be given for composite types */
3129 Assert(OidIsValid(typentry->typrelid));
3130
3131 /*
3132 * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3133 * information indicating entry should be still there.
3134 */
3135 if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3136 !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3137 typentry->tupDesc == NULL)
3138 {
3139 bool found;
3140
3142 &typentry->typrelid,
3143 HASH_REMOVE, &found);
3144 Assert(found || is_in_progress);
3145 }
3146 else
3147 {
3148#ifdef USE_ASSERT_CHECKING
3149 /*
3150 * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3151 * entry if it should exist.
3152 */
3153 bool found;
3154
3155 if (!is_in_progress)
3156 {
3158 &typentry->typrelid,
3159 HASH_FIND, &found);
3160 Assert(found);
3161 }
3162#endif
3163 }
3164}
3165
3166/*
3167 * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3168 * entries, marked as in-progress by lookup_type_cache(). It may happen
3169 * in case of an error or interruption during the lookup_type_cache() call.
3170 */
3171static void
3173{
3174 int i;
3175
3176 for (i = 0; i < in_progress_list_len; i++)
3177 {
3178 TypeCacheEntry *typentry;
3179
3182 HASH_FIND, NULL);
3183 if (typentry)
3185 }
3186
3188}
3189
3190void
3192{
3194}
3195
3196void
3198{
3200}
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:219
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:364
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
Bitmapset * bms_copy(const Bitmapset *a)
Definition: bitmapset.c:122
#define TextDatumGetCString(d)
Definition: builtins.h:98
#define NameStr(name)
Definition: c.h:752
#define RegProcedureIsValid(p)
Definition: c.h:777
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:471
int32_t int32
Definition: c.h:535
uint64_t uint64
Definition: c.h:540
uint32_t uint32
Definition: c.h:539
float float4
Definition: c.h:635
#define MemSet(start, val, len)
Definition: c.h:1020
#define OidIsValid(objectId)
Definition: c.h:775
size_t Size
Definition: c.h:611
void CreateCacheMemoryContext(void)
Definition: catcache.c:709
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:957
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:841
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_allocate(area, size)
Definition: dsa.h:109
bool dshash_delete_key(dshash_table *hash_table, const void *key)
Definition: dshash.c:503
void dshash_memcpy(void *dest, const void *src, size_t size, void *arg)
Definition: dshash.c:590
void dshash_release_lock(dshash_table *hash_table, void *entry)
Definition: dshash.c:558
void dshash_detach(dshash_table *hash_table)
Definition: dshash.c:307
void * dshash_find(dshash_table *hash_table, const void *key, bool exclusive)
Definition: dshash.c:390
dshash_table_handle dshash_get_hash_table_handle(dshash_table *hash_table)
Definition: dshash.c:367
dshash_table * dshash_attach(dsa_area *area, const dshash_parameters *params, dshash_table_handle handle, void *arg)
Definition: dshash.c:270
void * dshash_find_or_insert(dshash_table *hash_table, const void *key, bool *found)
Definition: dshash.c:433
dshash_hash dshash_memhash(const void *v, size_t size, void *arg)
Definition: dshash.c:581
dshash_table * dshash_create(dsa_area *area, const dshash_parameters *params, void *arg)
Definition: dshash.c:206
int dshash_memcmp(const void *a, const void *b, size_t size, void *arg)
Definition: dshash.c:572
dsa_pointer dshash_table_handle
Definition: dshash.h:24
void on_dsm_detach(dsm_segment *seg, on_dsm_detach_callback function, Datum arg)
Definition: dsm.c:1132
void hash_seq_init_with_hash_value(HASH_SEQ_STATUS *status, HTAB *hashp, uint32 hashvalue)
Definition: dynahash.c:1400
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1415
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:908
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define PG_RE_THROW()
Definition: elog.h:405
#define PG_TRY(...)
Definition: elog.h:372
#define PG_END_TRY(...)
Definition: elog.h:397
#define ERROR
Definition: elog.h:39
#define PG_CATCH(...)
Definition: elog.h:382
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
ExprState * ExecInitExpr(Expr *node, PlanState *parent)
Definition: execExpr.c:143
@ DOM_CONSTRAINT_CHECK
Definition: execnodes.h:1044
@ DOM_CONSTRAINT_NOTNULL
Definition: execnodes.h:1043
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:137
char * format_type_be(Oid type_oid)
Definition: format_type.c:343
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:603
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:514
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:388
#define HASHSTANDARD_PROC
Definition: hash.h:355
#define HASHEXTENDED_PROC
Definition: hash.h:356
Assert(PointerIsAligned(start, uint64))
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
static void * GETSTRUCT(const HeapTupleData *tuple)
Definition: htup_details.h:728
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:861
#define IsParallelWorker()
Definition: parallel.h:60
Oid GetDefaultOpClass(Oid type_id, Oid am_id)
Definition: indexcmds.c:2344
long val
Definition: informix.c:689
#define INJECTION_POINT(name, arg)
static int pg_cmp_u32(uint32 a, uint32 b)
Definition: int.h:652
void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, Datum arg)
Definition: inval.c:1854
void CacheRegisterSyscacheCallback(int cacheid, SyscacheCallbackFunction func, Datum arg)
Definition: inval.c:1812
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int i
Definition: isn.c:77
List * lappend(List *list, void *datum)
Definition: list.c:339
List * lcons(void *datum, List *list)
Definition: list.c:495
#define AccessShareLock
Definition: lockdefs.h:36
Oid get_opclass_input_type(Oid opclass)
Definition: lsyscache.c:1331
Oid get_opclass_family(Oid opclass)
Definition: lsyscache.c:1309
Oid get_multirange_range(Oid multirangeOid)
Definition: lsyscache.c:3650
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:889
RegProcedure get_opcode(Oid opno)
Definition: lsyscache.c:1452
Oid get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, int16 strategy)
Definition: lsyscache.c:168
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2999
Oid getBaseTypeAndTypmod(Oid typid, int32 *typmod)
Definition: lsyscache.c:2705
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1263
char * pstrdup(const char *in)
Definition: mcxt.c:1759
void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb)
Definition: mcxt.c:579
void MemoryContextSetParent(MemoryContext context, MemoryContext new_parent)
Definition: mcxt.c:683
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1610
void pfree(void *pointer)
Definition: mcxt.c:1594
MemoryContext TopMemoryContext
Definition: mcxt.c:166
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
MemoryContext CacheMemoryContext
Definition: mcxt.c:169
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:469
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_SMALL_SIZES
Definition: memutils.h:170
#define BTORDER_PROC
Definition: nbtree.h:716
#define copyObject(obj)
Definition: nodes.h:232
#define makeNode(_type_)
Definition: nodes.h:161
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:202
void * arg
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
FormData_pg_constraint * Form_pg_constraint
const void * data
FormData_pg_enum * Form_pg_enum
Definition: pg_enum.h:44
#define lfirst(lc)
Definition: pg_list.h:172
#define NIL
Definition: pg_list.h:68
FormData_pg_range * Form_pg_range
Definition: pg_range.h:58
FormData_pg_type * Form_pg_type
Definition: pg_type.h:261
Expr * expression_planner(Expr *expr)
Definition: planner.c:6719
#define qsort(a, b, c, d)
Definition: port.h:479
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:262
uint64_t Datum
Definition: postgres.h:70
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
char * c
tree ctl
Definition: radixtree.h:1838
void * stringToNode(const char *str)
Definition: read.c:90
#define RelationGetDescr(relation)
Definition: rel.h:540
void ScanKeyInit(ScanKey entry, AttrNumber attributeNumber, StrategyNumber strategy, RegProcedure procedure, Datum argument)
Definition: scankey.c:76
Session * CurrentSession
Definition: session.c:48
void relation_close(Relation relation, LOCKMODE lockmode)
Definition: relation.c:205
Relation relation_open(Oid relationId, LOCKMODE lockmode)
Definition: relation.c:47
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define HTEqualStrategyNumber
Definition: stratnum.h:41
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
MemoryContext dccContext
Definition: typcache.c:142
DomainConstraintCache * dcc
Definition: typcache.h:173
MemoryContext refctx
Definition: typcache.h:168
MemoryContextCallback callback
Definition: typcache.h:174
TypeCacheEntry * tcache
Definition: typcache.h:169
DomainConstraintType constrainttype
Definition: execnodes.h:1050
ExprState * check_exprstate
Definition: execnodes.h:1053
float4 sort_order
Definition: typcache.c:150
Oid enum_oid
Definition: typcache.c:149
Oid fn_oid
Definition: fmgr.h:59
Definition: dynahash.c:222
Definition: pg_list.h:54
MemoryContextCallbackFunction func
Definition: palloc.h:49
TupleDesc tupdesc
Definition: typcache.c:174
TupleDesc rd_att
Definition: rel.h:112
Form_pg_class rd_rel
Definition: rel.h:111
dsm_segment * segment
Definition: session.h:27
dshash_table * shared_record_table
Definition: session.h:32
struct SharedRecordTypmodRegistry * shared_typmod_registry
Definition: session.h:31
dsa_area * area
Definition: session.h:28
dshash_table * shared_typmod_table
Definition: session.h:33
SharedRecordTableKey key
Definition: typcache.c:213
TupleDesc local_tupdesc
Definition: typcache.c:201
union SharedRecordTableKey::@32 u
dsa_pointer shared_tupdesc
Definition: typcache.c:202
dshash_table_handle typmod_table_handle
Definition: typcache.c:186
pg_atomic_uint32 next_typmod
Definition: typcache.c:188
dshash_table_handle record_table_handle
Definition: typcache.c:184
dsa_pointer shared_tupdesc
Definition: typcache.c:223
int tdrefcount
Definition: tupdesc.h:140
int32 tdtypmod
Definition: tupdesc.h:139
Oid tdtypeid
Definition: tupdesc.h:138
uint32 type_id_hash
Definition: typcache.h:36
uint64 tupDesc_identifier
Definition: typcache.h:91
FmgrInfo hash_proc_finfo
Definition: typcache.h:78
int32 domainBaseTypmod
Definition: typcache.h:116
Oid hash_extended_proc
Definition: typcache.h:67
Oid typsubscript
Definition: typcache.h:45
FmgrInfo rng_cmp_proc_finfo
Definition: typcache.h:102
FmgrInfo cmp_proc_finfo
Definition: typcache.h:77
Oid rng_collation
Definition: typcache.h:101
char typalign
Definition: typcache.h:41
struct TypeCacheEntry * rngelemtype
Definition: typcache.h:99
char typtype
Definition: typcache.h:43
TupleDesc tupDesc
Definition: typcache.h:90
FmgrInfo hash_extended_proc_finfo
Definition: typcache.h:79
DomainConstraintCache * domainData
Definition: typcache.h:122
struct TypeCacheEntry * rngtype
Definition: typcache.h:109
FmgrInfo rng_subdiff_finfo
Definition: typcache.h:104
FmgrInfo eq_opr_finfo
Definition: typcache.h:76
Oid btree_opintype
Definition: typcache.h:59
struct TypeCacheEnumData * enumData
Definition: typcache.h:131
struct TypeCacheEntry * nextDomain
Definition: typcache.h:134
bool typbyval
Definition: typcache.h:40
FmgrInfo rng_canonical_finfo
Definition: typcache.h:103
int16 typlen
Definition: typcache.h:39
Oid hash_opintype
Definition: typcache.h:61
Oid typcollation
Definition: typcache.h:48
Oid domainBaseType
Definition: typcache.h:115
char typstorage
Definition: typcache.h:42
Oid rng_opfamily
Definition: typcache.h:100
Bitmapset * sorted_values
Definition: typcache.c:156
EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER]
Definition: typcache.c:158
Definition: dsa.c:348
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:264
HeapTuple SearchSysCache1(int cacheId, Datum key1)
Definition: syscache.c:220
#define GetSysCacheHashValue1(cacheId, key1)
Definition: syscache.h:118
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
static ItemArray items
Definition: test_tidstore.c:48
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:340
void TupleDescCopy(TupleDesc dst, TupleDesc src)
Definition: tupdesc.c:428
void DecrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:577
void FreeTupleDesc(TupleDesc tupdesc)
Definition: tupdesc.c:502
void IncrTupleDescRefCount(TupleDesc tupdesc)
Definition: tupdesc.c:559
uint32 hashRowType(TupleDesc desc)
Definition: tupdesc.c:813
TupleDesc CreateTupleDescCopy(TupleDesc tupdesc)
Definition: tupdesc.c:252
bool equalRowTypes(TupleDesc tupdesc1, TupleDesc tupdesc2)
Definition: tupdesc.c:777
#define TupleDescSize(src)
Definition: tupdesc.h:198
#define PinTupleDesc(tupdesc)
Definition: tupdesc.h:213
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:160
struct TupleDescData * TupleDesc
Definition: tupdesc.h:145
#define TCFLAGS_CHECKED_BTREE_OPCLASS
Definition: typcache.c:100
#define TCFLAGS_CHECKED_HASH_OPCLASS
Definition: typcache.c:101
static bool range_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1714
static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition: typcache.c:3074
void InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref, MemoryContext refctx, bool need_exprstate)
Definition: typcache.c:1401
static TupleDesc lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1827
TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
Definition: typcache.c:1921
void SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
Definition: typcache.c:2295
#define TCFLAGS_OPERATOR_FLAGS
Definition: typcache.c:122
#define TCFLAGS_CHECKED_FIELD_PROPERTIES
Definition: typcache.c:113
static void cache_range_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1730
#define TCFLAGS_HAVE_FIELD_COMPARE
Definition: typcache.c:115
void AtEOXact_TypeCache(void)
Definition: typcache.c:3191
#define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE
Definition: typcache.c:119
static void load_enum_cache_data(TypeCacheEntry *tcache)
Definition: typcache.c:2736
static bool record_fields_have_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1593
static HTAB * RelIdToTypeIdCacheHash
Definition: typcache.c:87
static EnumItem * find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2891
static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1601
static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
Definition: typcache.c:2942
static int in_progress_list_maxlen
Definition: typcache.c:228
static int32 NextRecordTypmod
Definition: typcache.c:306
TupleDesc lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1977
static Oid * in_progress_list
Definition: typcache.c:226
static const dshash_parameters srtr_typmod_table_params
Definition: typcache.c:285
static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
Definition: typcache.c:3108
#define TCFLAGS_CHECKED_GT_OPR
Definition: typcache.c:104
static bool multirange_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1754
static List * prep_domain_constraints(List *constraints, MemoryContext execctx)
Definition: typcache.c:1363
TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
Definition: typcache.c:1938
static bool record_fields_have_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1577
#define TCFLAGS_CHECKED_LT_OPR
Definition: typcache.c:103
#define TCFLAGS_CHECKED_HASH_PROC
Definition: typcache.c:106
static void dccref_deletion_callback(void *arg)
Definition: typcache.c:1342
#define TCFLAGS_HAVE_FIELD_EQUALITY
Definition: typcache.c:114
static void InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
Definition: typcache.c:2364
struct SharedRecordTableEntry SharedRecordTableEntry
void SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry, dsm_segment *segment, dsa_area *area)
Definition: typcache.c:2196
static int dcs_cmp(const void *a, const void *b)
Definition: typcache.c:1318
static bool array_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1539
static int shared_record_table_compare(const void *a, const void *b, size_t size, void *arg)
Definition: typcache.c:234
static bool array_element_has_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1531
static void load_multirangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1061
static uint32 type_cache_syshash(const void *key, Size keysize)
Definition: typcache.c:359
#define TCFLAGS_CHECKED_CMP_PROC
Definition: typcache.c:105
struct SharedTypmodTableEntry SharedTypmodTableEntry
#define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING
Definition: typcache.c:112
static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1762
static int in_progress_list_len
Definition: typcache.c:227
static bool array_element_has_equality(TypeCacheEntry *typentry)
Definition: typcache.c:1515
static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
Definition: typcache.c:2921
static void load_rangetype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1003
uint64 assign_record_type_identifier(Oid type_id, int32 typmod)
Definition: typcache.c:2133
static RecordCacheArrayEntry * RecordCacheArray
Definition: typcache.c:304
static bool range_element_has_extended_hashing(TypeCacheEntry *typentry)
Definition: typcache.c:1722
static HTAB * RecordCacheHash
Definition: typcache.c:295
static bool enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
Definition: typcache.c:2634
static TypeCacheEntry * firstDomainTypeEntry
Definition: typcache.c:96
struct RelIdToTypeIdCacheEntry RelIdToTypeIdCacheEntry
struct RecordCacheEntry RecordCacheEntry
void AtEOSubXact_TypeCache(void)
Definition: typcache.c:3197
static void shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
Definition: typcache.c:3054
#define TCFLAGS_HAVE_ELEM_HASHING
Definition: typcache.c:111
struct RecordCacheArrayEntry RecordCacheArrayEntry
#define TCFLAGS_CHECKED_HASH_EXTENDED_PROC
Definition: typcache.c:107
static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2515
struct TypeCacheEnumData TypeCacheEnumData
static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2610
static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
Definition: typcache.c:2572
static void load_domaintype_info(TypeCacheEntry *typentry)
Definition: typcache.c:1083
bool DomainHasConstraints(Oid type_id)
Definition: typcache.c:1488
#define TCFLAGS_HAVE_ELEM_COMPARE
Definition: typcache.c:110
static void TypeCacheRelCallback(Datum arg, Oid relid)
Definition: typcache.c:2419
static void cache_array_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1547
size_t SharedRecordTypmodRegistryEstimate(void)
Definition: typcache.c:2174
static void cache_multirange_element_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1770
#define TCFLAGS_CHECKED_ELEM_PROPERTIES
Definition: typcache.c:108
#define TCFLAGS_HAVE_ELEM_EQUALITY
Definition: typcache.c:109
static bool array_element_has_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1523
#define TCFLAGS_HAVE_PG_TYPE_DATA
Definition: typcache.c:99
static uint32 shared_record_table_hash(const void *a, size_t size, void *arg)
Definition: typcache.c:260
int compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
Definition: typcache.c:2663
#define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS
Definition: typcache.c:118
#define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING
Definition: typcache.c:117
struct SharedRecordTableKey SharedRecordTableKey
static int32 RecordCacheArrayLen
Definition: typcache.c:305
void assign_record_type_typmod(TupleDesc tupDesc)
Definition: typcache.c:2041
static HTAB * TypeCacheHash
Definition: typcache.c:79
static uint64 tupledesc_id_counter
Definition: typcache.c:313
static bool record_fields_have_compare(TypeCacheEntry *typentry)
Definition: typcache.c:1585
#define TCFLAGS_HAVE_FIELD_HASHING
Definition: typcache.c:116
static int record_type_typmod_compare(const void *a, const void *b, size_t size)
Definition: typcache.c:2025
static const dshash_parameters srtr_record_table_params
Definition: typcache.c:275
TupleDesc lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
Definition: typcache.c:1955
static int enum_oid_cmp(const void *left, const void *right)
Definition: typcache.c:2908
static void finalize_in_progress_typentries(void)
Definition: typcache.c:3172
static void decr_dcc_refcount(DomainConstraintCache *dcc)
Definition: typcache.c:1331
#define TCFLAGS_CHECKED_EQ_OPR
Definition: typcache.c:102
void UpdateDomainConstraintRef(DomainConstraintRef *ref)
Definition: typcache.c:1439
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:386
static void ensure_record_cache_typmod_slot_exists(int32 typmod)
Definition: typcache.c:1798
static void cache_record_field_properties(TypeCacheEntry *typentry)
Definition: typcache.c:1609
static uint32 record_type_typmod_hash(const void *data, size_t size)
Definition: typcache.c:2014
static void load_typcache_tupdesc(TypeCacheEntry *typentry)
Definition: typcache.c:969
#define INVALID_TUPLEDESC_IDENTIFIER
Definition: typcache.h:157
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:145
#define TYPECACHE_EQ_OPR
Definition: typcache.h:138
#define TYPECACHE_HASH_OPFAMILY
Definition: typcache.h:148
#define TYPECACHE_TUPDESC
Definition: typcache.h:146
#define TYPECACHE_MULTIRANGE_INFO
Definition: typcache.h:154
struct SharedRecordTypmodRegistry SharedRecordTypmodRegistry
Definition: typcache.h:177
#define TYPECACHE_EQ_OPR_FINFO
Definition: typcache.h:143
#define TYPECACHE_HASH_EXTENDED_PROC
Definition: typcache.h:152
#define TYPECACHE_BTREE_OPFAMILY
Definition: typcache.h:147
#define TYPECACHE_DOMAIN_BASE_INFO
Definition: typcache.h:150
#define TYPECACHE_DOMAIN_CONSTR_INFO
Definition: typcache.h:151
#define TYPECACHE_RANGE_INFO
Definition: typcache.h:149
#define TYPECACHE_GT_OPR
Definition: typcache.h:140
#define TYPECACHE_CMP_PROC
Definition: typcache.h:141
struct TypeCacheEntry TypeCacheEntry
#define TYPECACHE_LT_OPR
Definition: typcache.h:139
#define TYPECACHE_HASH_EXTENDED_PROC_FINFO
Definition: typcache.h:153
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:144
#define TYPECACHE_HASH_PROC
Definition: typcache.h:142