Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
tuplesortvariants.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * tuplesortvariants.c
4 * Implementation of tuple sorting variants.
5 *
6 * This module handles the sorting of heap tuples, index tuples, or single
7 * Datums. The implementation is based on the generalized tuple sorting
8 * facility given in tuplesort.c. Support other kinds of sortable objects
9 * could be easily added here, another module, or even an extension.
10 *
11 *
12 * Copyright (c) 2022-2025, PostgreSQL Global Development Group
13 *
14 * IDENTIFICATION
15 * src/backend/utils/sort/tuplesortvariants.c
16 *
17 *-------------------------------------------------------------------------
18 */
19
20#include "postgres.h"
21
22#include "access/brin_tuple.h"
23#include "access/gin_tuple.h"
24#include "access/hash.h"
25#include "access/htup_details.h"
26#include "access/nbtree.h"
27#include "catalog/index.h"
29#include "executor/executor.h"
30#include "pg_trace.h"
31#include "utils/datum.h"
32#include "utils/guc.h"
33#include "utils/lsyscache.h"
34#include "utils/rel.h"
35#include "utils/tuplesort.h"
36
37
38/* sort-type codes for sort__start probes */
39#define HEAP_SORT 0
40#define INDEX_SORT 1
41#define DATUM_SORT 2
42#define CLUSTER_SORT 3
43
45 int count);
47 int count);
49 int count);
51 int count);
53 int count);
55 int count);
56static int comparetup_heap(const SortTuple *a, const SortTuple *b,
58static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b,
61 SortTuple *stup);
62static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
63 LogicalTape *tape, unsigned int len);
64static int comparetup_cluster(const SortTuple *a, const SortTuple *b,
66static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b,
69 SortTuple *stup);
71 LogicalTape *tape, unsigned int tuplen);
72static int comparetup_index_btree(const SortTuple *a, const SortTuple *b,
74static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b,
76static int comparetup_index_hash(const SortTuple *a, const SortTuple *b,
78static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b,
80static int comparetup_index_brin(const SortTuple *a, const SortTuple *b,
82static int comparetup_index_gin(const SortTuple *a, const SortTuple *b,
85 SortTuple *stup);
87 LogicalTape *tape, unsigned int len);
89 SortTuple *stup);
91 LogicalTape *tape, unsigned int len);
93 SortTuple *stup);
95 LogicalTape *tape, unsigned int len);
96static int comparetup_datum(const SortTuple *a, const SortTuple *b,
98static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b,
101 SortTuple *stup);
102static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
103 LogicalTape *tape, unsigned int len);
105
106/*
107 * Data structure pointed by "TuplesortPublic.arg" for the CLUSTER case. Set by
108 * the tuplesort_begin_cluster.
109 */
110typedef struct
111{
113
114 IndexInfo *indexInfo; /* info about index being used for reference */
115 EState *estate; /* for evaluating index expressions */
117
118/*
119 * Data structure pointed by "TuplesortPublic.arg" for the IndexTuple case.
120 * Set by tuplesort_begin_index_xxx and used only by the IndexTuple routines.
121 */
122typedef struct
123{
124 Relation heapRel; /* table the index is being built on */
125 Relation indexRel; /* index being built */
127
128/*
129 * Data structure pointed by "TuplesortPublic.arg" for the index_btree subcase.
130 */
131typedef struct
132{
134
135 bool enforceUnique; /* complain if we find duplicate tuples */
136 bool uniqueNullsNotDistinct; /* unique constraint null treatment */
138
139/*
140 * Data structure pointed by "TuplesortPublic.arg" for the index_hash subcase.
141 */
142typedef struct
143{
145
146 uint32 high_mask; /* masks for sortable part of hash code */
150
151/*
152 * Data structure pointed by "TuplesortPublic.arg" for the Datum case.
153 * Set by tuplesort_begin_datum and used only by the DatumTuple routines.
154 */
155typedef struct
156{
157 /* the datatype oid of Datum's to be sorted */
159 /* we need typelen in order to know how to copy the Datums. */
162
163/*
164 * Computing BrinTuple size with only the tuple is difficult, so we want to track
165 * the length referenced by the SortTuple. That's what BrinSortTuple is meant
166 * to do - it's essentially a BrinTuple prefixed by its length.
167 */
168typedef struct BrinSortTuple
169{
173
174/* Size of the BrinSortTuple, given length of the BrinTuple. */
175#define BRINSORTTUPLE_SIZE(len) (offsetof(BrinSortTuple, tuple) + (len))
176
177
180 int nkeys, AttrNumber *attNums,
181 Oid *sortOperators, Oid *sortCollations,
182 bool *nullsFirstFlags,
183 int workMem, SortCoordinate coordinate, int sortopt)
184{
185 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
186 sortopt);
188 MemoryContext oldcontext;
189 int i;
190
191 oldcontext = MemoryContextSwitchTo(base->maincontext);
192
193 Assert(nkeys > 0);
194
195 if (trace_sort)
196 elog(LOG,
197 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
198 nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
199
200 base->nKeys = nkeys;
201
202 TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
203 false, /* no unique check */
204 nkeys,
205 workMem,
206 sortopt & TUPLESORT_RANDOMACCESS,
207 PARALLEL_SORT(coordinate));
208
212 base->writetup = writetup_heap;
213 base->readtup = readtup_heap;
214 base->haveDatum1 = true;
215 base->arg = tupDesc; /* assume we need not copy tupDesc */
216
217 /* Prepare SortSupport data for each column */
218 base->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
219
220 for (i = 0; i < nkeys; i++)
221 {
222 SortSupport sortKey = base->sortKeys + i;
223
224 Assert(attNums[i] != 0);
225 Assert(sortOperators[i] != 0);
226
228 sortKey->ssup_collation = sortCollations[i];
229 sortKey->ssup_nulls_first = nullsFirstFlags[i];
230 sortKey->ssup_attno = attNums[i];
231 /* Convey if abbreviation optimization is applicable in principle */
232 sortKey->abbreviate = (i == 0 && base->haveDatum1);
233
234 PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
235 }
236
237 /*
238 * The "onlyKey" optimization cannot be used with abbreviated keys, since
239 * tie-breaker comparisons may be required. Typically, the optimization
240 * is only of value to pass-by-value types anyway, whereas abbreviated
241 * keys are typically only of value to pass-by-reference types.
242 */
243 if (nkeys == 1 && !base->sortKeys->abbrev_converter)
244 base->onlyKey = base->sortKeys;
245
246 MemoryContextSwitchTo(oldcontext);
247
248 return state;
249}
250
253 Relation indexRel,
254 int workMem,
255 SortCoordinate coordinate, int sortopt)
256{
257 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
258 sortopt);
260 BTScanInsert indexScanKey;
261 MemoryContext oldcontext;
263 int i;
264
265 Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
266
267 oldcontext = MemoryContextSwitchTo(base->maincontext);
269
270 if (trace_sort)
271 elog(LOG,
272 "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
274 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
275
277
278 TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT,
279 false, /* no unique check */
280 base->nKeys,
281 workMem,
282 sortopt & TUPLESORT_RANDOMACCESS,
283 PARALLEL_SORT(coordinate));
284
289 base->readtup = readtup_cluster;
291 base->arg = arg;
292
293 arg->indexInfo = BuildIndexInfo(indexRel);
294
295 /*
296 * If we don't have a simple leading attribute, we don't currently
297 * initialize datum1, so disable optimizations that require it.
298 */
299 if (arg->indexInfo->ii_IndexAttrNumbers[0] == 0)
300 base->haveDatum1 = false;
301 else
302 base->haveDatum1 = true;
303
304 arg->tupDesc = tupDesc; /* assume we need not copy tupDesc */
305
306 indexScanKey = _bt_mkscankey(indexRel, NULL);
307
308 if (arg->indexInfo->ii_Expressions != NULL)
309 {
310 TupleTableSlot *slot;
311 ExprContext *econtext;
312
313 /*
314 * We will need to use FormIndexDatum to evaluate the index
315 * expressions. To do that, we need an EState, as well as a
316 * TupleTableSlot to put the table tuples into. The econtext's
317 * scantuple has to point to that slot, too.
318 */
319 arg->estate = CreateExecutorState();
321 econtext = GetPerTupleExprContext(arg->estate);
322 econtext->ecxt_scantuple = slot;
323 }
324
325 /* Prepare SortSupport data for each column */
326 base->sortKeys = (SortSupport) palloc0(base->nKeys *
327 sizeof(SortSupportData));
328
329 for (i = 0; i < base->nKeys; i++)
330 {
331 SortSupport sortKey = base->sortKeys + i;
332 ScanKey scanKey = indexScanKey->scankeys + i;
333 bool reverse;
334
336 sortKey->ssup_collation = scanKey->sk_collation;
337 sortKey->ssup_nulls_first =
338 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
339 sortKey->ssup_attno = scanKey->sk_attno;
340 /* Convey if abbreviation optimization is applicable in principle */
341 sortKey->abbreviate = (i == 0 && base->haveDatum1);
342
343 Assert(sortKey->ssup_attno != 0);
344
345 reverse = (scanKey->sk_flags & SK_BT_DESC) != 0;
346
347 PrepareSortSupportFromIndexRel(indexRel, reverse, sortKey);
348 }
349
350 pfree(indexScanKey);
351
352 MemoryContextSwitchTo(oldcontext);
353
354 return state;
355}
356
359 Relation indexRel,
360 bool enforceUnique,
361 bool uniqueNullsNotDistinct,
362 int workMem,
363 SortCoordinate coordinate,
364 int sortopt)
365{
366 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
367 sortopt);
369 BTScanInsert indexScanKey;
371 MemoryContext oldcontext;
372 int i;
373
374 oldcontext = MemoryContextSwitchTo(base->maincontext);
376
377 if (trace_sort)
378 elog(LOG,
379 "begin index sort: unique = %c, workMem = %d, randomAccess = %c",
380 enforceUnique ? 't' : 'f',
381 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
382
384
385 TRACE_POSTGRESQL_SORT_START(INDEX_SORT,
386 enforceUnique,
387 base->nKeys,
388 workMem,
389 sortopt & TUPLESORT_RANDOMACCESS,
390 PARALLEL_SORT(coordinate));
391
395 base->writetup = writetup_index;
396 base->readtup = readtup_index;
397 base->haveDatum1 = true;
398 base->arg = arg;
399
400 arg->index.heapRel = heapRel;
401 arg->index.indexRel = indexRel;
402 arg->enforceUnique = enforceUnique;
403 arg->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
404
405 indexScanKey = _bt_mkscankey(indexRel, NULL);
406
407 /* Prepare SortSupport data for each column */
408 base->sortKeys = (SortSupport) palloc0(base->nKeys *
409 sizeof(SortSupportData));
410
411 for (i = 0; i < base->nKeys; i++)
412 {
413 SortSupport sortKey = base->sortKeys + i;
414 ScanKey scanKey = indexScanKey->scankeys + i;
415 bool reverse;
416
418 sortKey->ssup_collation = scanKey->sk_collation;
419 sortKey->ssup_nulls_first =
420 (scanKey->sk_flags & SK_BT_NULLS_FIRST) != 0;
421 sortKey->ssup_attno = scanKey->sk_attno;
422 /* Convey if abbreviation optimization is applicable in principle */
423 sortKey->abbreviate = (i == 0 && base->haveDatum1);
424
425 Assert(sortKey->ssup_attno != 0);
426
427 reverse = (scanKey->sk_flags & SK_BT_DESC) != 0;
428
429 PrepareSortSupportFromIndexRel(indexRel, reverse, sortKey);
430 }
431
432 pfree(indexScanKey);
433
434 MemoryContextSwitchTo(oldcontext);
435
436 return state;
437}
438
441 Relation indexRel,
442 uint32 high_mask,
443 uint32 low_mask,
444 uint32 max_buckets,
445 int workMem,
446 SortCoordinate coordinate,
447 int sortopt)
448{
449 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
450 sortopt);
452 MemoryContext oldcontext;
454
455 oldcontext = MemoryContextSwitchTo(base->maincontext);
457
458 if (trace_sort)
459 elog(LOG,
460 "begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
461 "max_buckets = 0x%x, workMem = %d, randomAccess = %c",
462 high_mask,
463 low_mask,
464 max_buckets,
465 workMem,
466 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
467
468 base->nKeys = 1; /* Only one sort column, the hash code */
469
473 base->writetup = writetup_index;
474 base->readtup = readtup_index;
475 base->haveDatum1 = true;
476 base->arg = arg;
477
478 arg->index.heapRel = heapRel;
479 arg->index.indexRel = indexRel;
480
481 arg->high_mask = high_mask;
482 arg->low_mask = low_mask;
483 arg->max_buckets = max_buckets;
484
485 MemoryContextSwitchTo(oldcontext);
486
487 return state;
488}
489
492 Relation indexRel,
493 int workMem,
494 SortCoordinate coordinate,
495 int sortopt)
496{
497 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
498 sortopt);
500 MemoryContext oldcontext;
502 int i;
503
504 oldcontext = MemoryContextSwitchTo(base->maincontext);
506
507 if (trace_sort)
508 elog(LOG,
509 "begin index sort: workMem = %d, randomAccess = %c",
510 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
511
513
517 base->writetup = writetup_index;
518 base->readtup = readtup_index;
519 base->haveDatum1 = true;
520 base->arg = arg;
521
522 arg->index.heapRel = heapRel;
523 arg->index.indexRel = indexRel;
524 arg->enforceUnique = false;
525 arg->uniqueNullsNotDistinct = false;
526
527 /* Prepare SortSupport data for each column */
528 base->sortKeys = (SortSupport) palloc0(base->nKeys *
529 sizeof(SortSupportData));
530
531 for (i = 0; i < base->nKeys; i++)
532 {
533 SortSupport sortKey = base->sortKeys + i;
534
536 sortKey->ssup_collation = indexRel->rd_indcollation[i];
537 sortKey->ssup_nulls_first = false;
538 sortKey->ssup_attno = i + 1;
539 /* Convey if abbreviation optimization is applicable in principle */
540 sortKey->abbreviate = (i == 0 && base->haveDatum1);
541
542 Assert(sortKey->ssup_attno != 0);
543
544 /* Look for a sort support function */
545 PrepareSortSupportFromGistIndexRel(indexRel, sortKey);
546 }
547
548 MemoryContextSwitchTo(oldcontext);
549
550 return state;
551}
552
555 SortCoordinate coordinate,
556 int sortopt)
557{
558 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
559 sortopt);
561
562 if (trace_sort)
563 elog(LOG,
564 "begin index sort: workMem = %d, randomAccess = %c",
565 workMem,
566 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
567
568 base->nKeys = 1; /* Only one sort column, the block number */
569
574 base->haveDatum1 = true;
575 base->arg = NULL;
576
577 return state;
578}
579
582 Relation indexRel,
583 int workMem, SortCoordinate coordinate,
584 int sortopt)
585{
586 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
587 sortopt);
589 MemoryContext oldcontext;
590 int i;
591 TupleDesc desc = RelationGetDescr(indexRel);
592
593 oldcontext = MemoryContextSwitchTo(base->maincontext);
594
595#ifdef TRACE_SORT
596 if (trace_sort)
597 elog(LOG,
598 "begin index sort: workMem = %d, randomAccess = %c",
599 workMem,
600 sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
601#endif
602
603 /*
604 * Multi-column GIN indexes expand the row into a separate index entry for
605 * attribute, and that's what we write into the tuplesort. But we still
606 * need to initialize sortsupport for all the attributes.
607 */
609
610 /* Prepare SortSupport data for each column */
611 base->sortKeys = (SortSupport) palloc0(base->nKeys *
612 sizeof(SortSupportData));
613
614 for (i = 0; i < base->nKeys; i++)
615 {
616 SortSupport sortKey = base->sortKeys + i;
617 Form_pg_attribute att = TupleDescAttr(desc, i);
618 TypeCacheEntry *typentry;
619
621 sortKey->ssup_collation = indexRel->rd_indcollation[i];
622 sortKey->ssup_nulls_first = false;
623 sortKey->ssup_attno = i + 1;
624 sortKey->abbreviate = false;
625
626 Assert(sortKey->ssup_attno != 0);
627
628 if (!OidIsValid(sortKey->ssup_collation))
629 sortKey->ssup_collation = DEFAULT_COLLATION_OID;
630
631 /*
632 * Look for a ordering for the index key data type, and then the sort
633 * support function.
634 */
635 typentry = lookup_type_cache(att->atttypid, TYPECACHE_LT_OPR);
636 PrepareSortSupportFromOrderingOp(typentry->lt_opr, sortKey);
637 }
638
643 base->haveDatum1 = false;
644 base->arg = NULL;
645
646 MemoryContextSwitchTo(oldcontext);
647
648 return state;
649}
650
652tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
653 bool nullsFirstFlag, int workMem,
654 SortCoordinate coordinate, int sortopt)
655{
656 Tuplesortstate *state = tuplesort_begin_common(workMem, coordinate,
657 sortopt);
660 MemoryContext oldcontext;
661 int16 typlen;
662 bool typbyval;
663
664 oldcontext = MemoryContextSwitchTo(base->maincontext);
666
667 if (trace_sort)
668 elog(LOG,
669 "begin datum sort: workMem = %d, randomAccess = %c",
670 workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
671
672 base->nKeys = 1; /* always a one-column sort */
673
674 TRACE_POSTGRESQL_SORT_START(DATUM_SORT,
675 false, /* no unique check */
676 1,
677 workMem,
678 sortopt & TUPLESORT_RANDOMACCESS,
679 PARALLEL_SORT(coordinate));
680
684 base->writetup = writetup_datum;
685 base->readtup = readtup_datum;
686 base->haveDatum1 = true;
687 base->arg = arg;
688
689 arg->datumType = datumType;
690
691 /* lookup necessary attributes of the datum type */
692 get_typlenbyval(datumType, &typlen, &typbyval);
693 arg->datumTypeLen = typlen;
694 base->tuples = !typbyval;
695
696 /* Prepare SortSupport data */
697 base->sortKeys = (SortSupport) palloc0(sizeof(SortSupportData));
698
700 base->sortKeys->ssup_collation = sortCollation;
701 base->sortKeys->ssup_nulls_first = nullsFirstFlag;
702
703 /*
704 * Abbreviation is possible here only for by-reference types. In theory,
705 * a pass-by-value datatype could have an abbreviated form that is cheaper
706 * to compare. In a tuple sort, we could support that, because we can
707 * always extract the original datum from the tuple as needed. Here, we
708 * can't, because a datum sort only stores a single copy of the datum; the
709 * "tuple" field of each SortTuple is NULL.
710 */
711 base->sortKeys->abbreviate = !typbyval;
712
713 PrepareSortSupportFromOrderingOp(sortOperator, base->sortKeys);
714
715 /*
716 * The "onlyKey" optimization cannot be used with abbreviated keys, since
717 * tie-breaker comparisons may be required. Typically, the optimization
718 * is only of value to pass-by-value types anyway, whereas abbreviated
719 * keys are typically only of value to pass-by-reference types.
720 */
721 if (!base->sortKeys->abbrev_converter)
722 base->onlyKey = base->sortKeys;
723
724 MemoryContextSwitchTo(oldcontext);
725
726 return state;
727}
728
729/*
730 * Accept one tuple while collecting input data for sort.
731 *
732 * Note that the input data is always copied; the caller need not save it.
733 */
734void
736{
739 TupleDesc tupDesc = (TupleDesc) base->arg;
740 SortTuple stup;
741 MinimalTuple tuple;
742 HeapTupleData htup;
743 Size tuplen;
744
745 /* copy the tuple into sort storage */
746 tuple = ExecCopySlotMinimalTuple(slot);
747 stup.tuple = tuple;
748 /* set up first-column key value */
749 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
750 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
751 stup.datum1 = heap_getattr(&htup,
752 base->sortKeys[0].ssup_attno,
753 tupDesc,
754 &stup.isnull1);
755
756 /* GetMemoryChunkSpace is not supported for bump contexts */
758 tuplen = MAXALIGN(tuple->t_len);
759 else
760 tuplen = GetMemoryChunkSpace(tuple);
761
763 base->sortKeys->abbrev_converter &&
764 !stup.isnull1, tuplen);
765
766 MemoryContextSwitchTo(oldcontext);
767}
768
769/*
770 * Accept one tuple while collecting input data for sort.
771 *
772 * Note that the input data is always copied; the caller need not save it.
773 */
774void
776{
777 SortTuple stup;
781 Size tuplen;
782
783 /* copy the tuple into sort storage */
784 tup = heap_copytuple(tup);
785 stup.tuple = tup;
786
787 /*
788 * set up first-column key value, and potentially abbreviate, if it's a
789 * simple column
790 */
791 if (base->haveDatum1)
792 {
793 stup.datum1 = heap_getattr(tup,
794 arg->indexInfo->ii_IndexAttrNumbers[0],
795 arg->tupDesc,
796 &stup.isnull1);
797 }
798
799 /* GetMemoryChunkSpace is not supported for bump contexts */
801 tuplen = MAXALIGN(HEAPTUPLESIZE + tup->t_len);
802 else
803 tuplen = GetMemoryChunkSpace(tup);
804
806 base->haveDatum1 &&
807 base->sortKeys->abbrev_converter &&
808 !stup.isnull1, tuplen);
809
810 MemoryContextSwitchTo(oldcontext);
811}
812
813/*
814 * Collect one index tuple while collecting input data for sort, building
815 * it from caller-supplied values.
816 */
817void
819 ItemPointer self, const Datum *values,
820 const bool *isnull)
821{
822 SortTuple stup;
823 IndexTuple tuple;
826 Size tuplen;
827
829 isnull, base->tuplecontext);
830 tuple = ((IndexTuple) stup.tuple);
831 tuple->t_tid = *self;
832 /* set up first-column key value */
833 stup.datum1 = index_getattr(tuple,
834 1,
835 RelationGetDescr(arg->indexRel),
836 &stup.isnull1);
837
838 /* GetMemoryChunkSpace is not supported for bump contexts */
840 tuplen = MAXALIGN(tuple->t_info & INDEX_SIZE_MASK);
841 else
842 tuplen = GetMemoryChunkSpace(tuple);
843
845 base->sortKeys &&
846 base->sortKeys->abbrev_converter &&
847 !stup.isnull1, tuplen);
848}
849
850/*
851 * Collect one BRIN tuple while collecting input data for sort.
852 */
853void
855{
856 SortTuple stup;
857 BrinSortTuple *bstup;
860 Size tuplen;
861
862 /* allocate space for the whole BRIN sort tuple */
863 bstup = palloc(BRINSORTTUPLE_SIZE(size));
864
865 bstup->tuplen = size;
866 memcpy(&bstup->tuple, tuple, size);
867
868 stup.tuple = bstup;
869 stup.datum1 = UInt32GetDatum(tuple->bt_blkno);
870 stup.isnull1 = false;
871
872 /* GetMemoryChunkSpace is not supported for bump contexts */
874 tuplen = MAXALIGN(BRINSORTTUPLE_SIZE(size));
875 else
876 tuplen = GetMemoryChunkSpace(bstup);
877
879 base->sortKeys &&
880 base->sortKeys->abbrev_converter &&
881 !stup.isnull1, tuplen);
882
883 MemoryContextSwitchTo(oldcontext);
884}
885
886void
888{
889 SortTuple stup;
890 GinTuple *ctup;
893 Size tuplen;
894
895 /* copy the GinTuple into the right memory context */
896 ctup = palloc(size);
897 memcpy(ctup, tuple, size);
898
899 stup.tuple = ctup;
900 stup.datum1 = (Datum) 0;
901 stup.isnull1 = false;
902
903 /* GetMemoryChunkSpace is not supported for bump contexts */
905 tuplen = MAXALIGN(size);
906 else
907 tuplen = GetMemoryChunkSpace(ctup);
908
910 base->sortKeys &&
911 base->sortKeys->abbrev_converter &&
912 !stup.isnull1, tuplen);
913
914 MemoryContextSwitchTo(oldcontext);
915}
916
917/*
918 * Accept one Datum while collecting input data for sort.
919 *
920 * If the Datum is pass-by-ref type, the value will be copied.
921 */
922void
924{
928 SortTuple stup;
929
930 /*
931 * Pass-by-value types or null values are just stored directly in
932 * stup.datum1 (and stup.tuple is not used and set to NULL).
933 *
934 * Non-null pass-by-reference values need to be copied into memory we
935 * control, and possibly abbreviated. The copied value is pointed to by
936 * stup.tuple and is treated as the canonical copy (e.g. to return via
937 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
938 * abbreviated value if abbreviation is happening, otherwise it's
939 * identical to stup.tuple.
940 */
941
942 if (isNull || !base->tuples)
943 {
944 /*
945 * Set datum1 to zeroed representation for NULLs (to be consistent,
946 * and to support cheap inequality tests for NULL abbreviated keys).
947 */
948 stup.datum1 = !isNull ? val : (Datum) 0;
949 stup.isnull1 = isNull;
950 stup.tuple = NULL; /* no separate storage */
951 }
952 else
953 {
954 stup.isnull1 = false;
955 stup.datum1 = datumCopy(val, false, arg->datumTypeLen);
956 stup.tuple = DatumGetPointer(stup.datum1);
957 }
958
960 base->tuples &&
961 base->sortKeys->abbrev_converter && !isNull, 0);
962
963 MemoryContextSwitchTo(oldcontext);
964}
965
966/*
967 * Fetch the next tuple in either forward or back direction.
968 * If successful, put tuple in slot and return true; else, clear the slot
969 * and return false.
970 *
971 * Caller may optionally be passed back abbreviated value (on true return
972 * value) when abbreviation was used, which can be used to cheaply avoid
973 * equality checks that might otherwise be required. Caller can safely make a
974 * determination of "non-equal tuple" based on simple binary inequality. A
975 * NULL value in leading attribute will set abbreviated value to zeroed
976 * representation, which caller may rely on in abbreviated inequality check.
977 *
978 * If copy is true, the slot receives a tuple that's been copied into the
979 * caller's memory context, so that it will stay valid regardless of future
980 * manipulations of the tuplesort's state (up to and including deleting the
981 * tuplesort). If copy is false, the slot will just receive a pointer to a
982 * tuple held within the tuplesort, which is more efficient, but only safe for
983 * callers that are prepared to have any subsequent manipulation of the
984 * tuplesort's state invalidate slot contents.
985 */
986bool
988 TupleTableSlot *slot, Datum *abbrev)
989{
992 SortTuple stup;
993
994 if (!tuplesort_gettuple_common(state, forward, &stup))
995 stup.tuple = NULL;
996
997 MemoryContextSwitchTo(oldcontext);
998
999 if (stup.tuple)
1000 {
1001 /* Record abbreviated key for caller */
1002 if (base->sortKeys->abbrev_converter && abbrev)
1003 *abbrev = stup.datum1;
1004
1005 if (copy)
1007
1008 ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, copy);
1009 return true;
1010 }
1011 else
1012 {
1013 ExecClearTuple(slot);
1014 return false;
1015 }
1016}
1017
1018/*
1019 * Fetch the next tuple in either forward or back direction.
1020 * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
1021 * context, and must not be freed by caller. Caller may not rely on tuple
1022 * remaining valid after any further manipulation of tuplesort.
1023 */
1026{
1029 SortTuple stup;
1030
1031 if (!tuplesort_gettuple_common(state, forward, &stup))
1032 stup.tuple = NULL;
1033
1034 MemoryContextSwitchTo(oldcontext);
1035
1036 return stup.tuple;
1037}
1038
1039/*
1040 * Fetch the next index tuple in either forward or back direction.
1041 * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
1042 * context, and must not be freed by caller. Caller may not rely on tuple
1043 * remaining valid after any further manipulation of tuplesort.
1044 */
1047{
1050 SortTuple stup;
1051
1052 if (!tuplesort_gettuple_common(state, forward, &stup))
1053 stup.tuple = NULL;
1054
1055 MemoryContextSwitchTo(oldcontext);
1056
1057 return (IndexTuple) stup.tuple;
1058}
1059
1060/*
1061 * Fetch the next BRIN tuple in either forward or back direction.
1062 * Returns NULL if no more tuples. Returned tuple belongs to tuplesort memory
1063 * context, and must not be freed by caller. Caller may not rely on tuple
1064 * remaining valid after any further manipulation of tuplesort.
1065 */
1066BrinTuple *
1068{
1071 SortTuple stup;
1072 BrinSortTuple *btup;
1073
1074 if (!tuplesort_gettuple_common(state, forward, &stup))
1075 stup.tuple = NULL;
1076
1077 MemoryContextSwitchTo(oldcontext);
1078
1079 if (!stup.tuple)
1080 return NULL;
1081
1082 btup = (BrinSortTuple *) stup.tuple;
1083
1084 *len = btup->tuplen;
1085
1086 return &btup->tuple;
1087}
1088
1089GinTuple *
1091{
1094 SortTuple stup;
1095 GinTuple *tup;
1096
1097 if (!tuplesort_gettuple_common(state, forward, &stup))
1098 stup.tuple = NULL;
1099
1100 MemoryContextSwitchTo(oldcontext);
1101
1102 if (!stup.tuple)
1103 return NULL;
1104
1105 tup = (GinTuple *) stup.tuple;
1106
1107 *len = tup->tuplen;
1108
1109 return tup;
1110}
1111
1112/*
1113 * Fetch the next Datum in either forward or back direction.
1114 * Returns false if no more datums.
1115 *
1116 * If the Datum is pass-by-ref type, the returned value is freshly palloc'd
1117 * in caller's context, and is now owned by the caller (this differs from
1118 * similar routines for other types of tuplesorts).
1119 *
1120 * Caller may optionally be passed back abbreviated value (on true return
1121 * value) when abbreviation was used, which can be used to cheaply avoid
1122 * equality checks that might otherwise be required. Caller can safely make a
1123 * determination of "non-equal tuple" based on simple binary inequality. A
1124 * NULL value will have a zeroed abbreviated value representation, which caller
1125 * may rely on in abbreviated inequality check.
1126 *
1127 * For byref Datums, if copy is true, *val is set to a copy of the Datum
1128 * copied into the caller's memory context, so that it will stay valid
1129 * regardless of future manipulations of the tuplesort's state (up to and
1130 * including deleting the tuplesort). If copy is false, *val will just be
1131 * set to a pointer to the Datum held within the tuplesort, which is more
1132 * efficient, but only safe for callers that are prepared to have any
1133 * subsequent manipulation of the tuplesort's state invalidate slot contents.
1134 * For byval Datums, the value of the 'copy' parameter has no effect.
1135
1136 */
1137bool
1138tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy,
1139 Datum *val, bool *isNull, Datum *abbrev)
1140{
1144 SortTuple stup;
1145
1146 if (!tuplesort_gettuple_common(state, forward, &stup))
1147 {
1148 MemoryContextSwitchTo(oldcontext);
1149 return false;
1150 }
1151
1152 /* Ensure we copy into caller's memory context */
1153 MemoryContextSwitchTo(oldcontext);
1154
1155 /* Record abbreviated key for caller */
1156 if (base->sortKeys->abbrev_converter && abbrev)
1157 *abbrev = stup.datum1;
1158
1159 if (stup.isnull1 || !base->tuples)
1160 {
1161 *val = stup.datum1;
1162 *isNull = stup.isnull1;
1163 }
1164 else
1165 {
1166 /* use stup.tuple because stup.datum1 may be an abbreviation */
1167 if (copy)
1168 *val = datumCopy(PointerGetDatum(stup.tuple), false,
1169 arg->datumTypeLen);
1170 else
1171 *val = PointerGetDatum(stup.tuple);
1172 *isNull = false;
1173 }
1174
1175 return true;
1176}
1177
1178
1179/*
1180 * Routines specialized for HeapTuple (actually MinimalTuple) case
1181 */
1182
1183static void
1185{
1186 int i;
1188
1189 for (i = 0; i < count; i++)
1190 {
1191 HeapTupleData htup;
1192
1193 htup.t_len = ((MinimalTuple) stups[i].tuple)->t_len +
1195 htup.t_data = (HeapTupleHeader) ((char *) stups[i].tuple -
1197 stups[i].datum1 = heap_getattr(&htup,
1198 base->sortKeys[0].ssup_attno,
1199 (TupleDesc) base->arg,
1200 &stups[i].isnull1);
1201 }
1202}
1203
1204static int
1206{
1208 SortSupport sortKey = base->sortKeys;
1209 int32 compare;
1210
1211
1212 /* Compare the leading sort key */
1213 compare = ApplySortComparator(a->datum1, a->isnull1,
1214 b->datum1, b->isnull1,
1215 sortKey);
1216 if (compare != 0)
1217 return compare;
1218
1219 /* Compare additional sort keys */
1221}
1222
1223static int
1225{
1227 SortSupport sortKey = base->sortKeys;
1228 HeapTupleData ltup;
1229 HeapTupleData rtup;
1230 TupleDesc tupDesc;
1231 int nkey;
1232 int32 compare;
1233 AttrNumber attno;
1234 Datum datum1,
1235 datum2;
1236 bool isnull1,
1237 isnull2;
1238
1239 ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
1240 ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET);
1241 rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET;
1242 rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET);
1243 tupDesc = (TupleDesc) base->arg;
1244
1245 if (sortKey->abbrev_converter)
1246 {
1247 attno = sortKey->ssup_attno;
1248
1249 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
1250 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
1251
1252 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1253 datum2, isnull2,
1254 sortKey);
1255 if (compare != 0)
1256 return compare;
1257 }
1258
1259 sortKey++;
1260 for (nkey = 1; nkey < base->nKeys; nkey++, sortKey++)
1261 {
1262 attno = sortKey->ssup_attno;
1263
1264 datum1 = heap_getattr(&ltup, attno, tupDesc, &isnull1);
1265 datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2);
1266
1267 compare = ApplySortComparator(datum1, isnull1,
1268 datum2, isnull2,
1269 sortKey);
1270 if (compare != 0)
1271 return compare;
1272 }
1273
1274 return 0;
1275}
1276
1277static void
1279{
1281 MinimalTuple tuple = (MinimalTuple) stup->tuple;
1282
1283 /* the part of the MinimalTuple we'll write: */
1284 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
1285 unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET;
1286
1287 /* total on-disk footprint: */
1288 unsigned int tuplen = tupbodylen + sizeof(int);
1289
1290 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1291 LogicalTapeWrite(tape, tupbody, tupbodylen);
1292 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1293 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1294}
1295
1296static void
1298 LogicalTape *tape, unsigned int len)
1299{
1300 unsigned int tupbodylen = len - sizeof(int);
1301 unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
1303 char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
1305 HeapTupleData htup;
1306
1307 /* read in the tuple proper */
1308 tuple->t_len = tuplen;
1309 LogicalTapeReadExact(tape, tupbody, tupbodylen);
1310 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1311 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1312 stup->tuple = tuple;
1313 /* set up first-column key value */
1314 htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
1315 htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
1316 stup->datum1 = heap_getattr(&htup,
1317 base->sortKeys[0].ssup_attno,
1318 (TupleDesc) base->arg,
1319 &stup->isnull1);
1320}
1321
1322/*
1323 * Routines specialized for the CLUSTER case (HeapTuple data, with
1324 * comparisons per a btree index definition)
1325 */
1326
1327static void
1329{
1330 int i;
1333
1334 for (i = 0; i < count; i++)
1335 {
1336 HeapTuple tup;
1337
1338 tup = (HeapTuple) stups[i].tuple;
1339 stups[i].datum1 = heap_getattr(tup,
1340 arg->indexInfo->ii_IndexAttrNumbers[0],
1341 arg->tupDesc,
1342 &stups[i].isnull1);
1343 }
1344}
1345
1346static int
1349{
1351 SortSupport sortKey = base->sortKeys;
1352 int32 compare;
1353
1354 /* Compare the leading sort key, if it's simple */
1355 if (base->haveDatum1)
1356 {
1357 compare = ApplySortComparator(a->datum1, a->isnull1,
1358 b->datum1, b->isnull1,
1359 sortKey);
1360 if (compare != 0)
1361 return compare;
1362 }
1363
1365}
1366
1367static int
1370{
1373 SortSupport sortKey = base->sortKeys;
1374 HeapTuple ltup;
1375 HeapTuple rtup;
1376 TupleDesc tupDesc;
1377 int nkey;
1378 int32 compare = 0;
1379 Datum datum1,
1380 datum2;
1381 bool isnull1,
1382 isnull2;
1383
1384 ltup = (HeapTuple) a->tuple;
1385 rtup = (HeapTuple) b->tuple;
1386 tupDesc = arg->tupDesc;
1387
1388 /* Compare the leading sort key, if it's simple */
1389 if (base->haveDatum1)
1390 {
1391 if (sortKey->abbrev_converter)
1392 {
1393 AttrNumber leading = arg->indexInfo->ii_IndexAttrNumbers[0];
1394
1395 datum1 = heap_getattr(ltup, leading, tupDesc, &isnull1);
1396 datum2 = heap_getattr(rtup, leading, tupDesc, &isnull2);
1397
1398 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1399 datum2, isnull2,
1400 sortKey);
1401 }
1402 if (compare != 0 || base->nKeys == 1)
1403 return compare;
1404 /* Compare additional columns the hard way */
1405 sortKey++;
1406 nkey = 1;
1407 }
1408 else
1409 {
1410 /* Must compare all keys the hard way */
1411 nkey = 0;
1412 }
1413
1414 if (arg->indexInfo->ii_Expressions == NULL)
1415 {
1416 /* If not expression index, just compare the proper heap attrs */
1417
1418 for (; nkey < base->nKeys; nkey++, sortKey++)
1419 {
1420 AttrNumber attno = arg->indexInfo->ii_IndexAttrNumbers[nkey];
1421
1422 datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1);
1423 datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2);
1424
1425 compare = ApplySortComparator(datum1, isnull1,
1426 datum2, isnull2,
1427 sortKey);
1428 if (compare != 0)
1429 return compare;
1430 }
1431 }
1432 else
1433 {
1434 /*
1435 * In the expression index case, compute the whole index tuple and
1436 * then compare values. It would perhaps be faster to compute only as
1437 * many columns as we need to compare, but that would require
1438 * duplicating all the logic in FormIndexDatum.
1439 */
1440 Datum l_index_values[INDEX_MAX_KEYS];
1441 bool l_index_isnull[INDEX_MAX_KEYS];
1442 Datum r_index_values[INDEX_MAX_KEYS];
1443 bool r_index_isnull[INDEX_MAX_KEYS];
1444 TupleTableSlot *ecxt_scantuple;
1445
1446 /* Reset context each time to prevent memory leakage */
1448
1449 ecxt_scantuple = GetPerTupleExprContext(arg->estate)->ecxt_scantuple;
1450
1451 ExecStoreHeapTuple(ltup, ecxt_scantuple, false);
1452 FormIndexDatum(arg->indexInfo, ecxt_scantuple, arg->estate,
1453 l_index_values, l_index_isnull);
1454
1455 ExecStoreHeapTuple(rtup, ecxt_scantuple, false);
1456 FormIndexDatum(arg->indexInfo, ecxt_scantuple, arg->estate,
1457 r_index_values, r_index_isnull);
1458
1459 for (; nkey < base->nKeys; nkey++, sortKey++)
1460 {
1461 compare = ApplySortComparator(l_index_values[nkey],
1462 l_index_isnull[nkey],
1463 r_index_values[nkey],
1464 r_index_isnull[nkey],
1465 sortKey);
1466 if (compare != 0)
1467 return compare;
1468 }
1469 }
1470
1471 return 0;
1472}
1473
1474static void
1476{
1478 HeapTuple tuple = (HeapTuple) stup->tuple;
1479 unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int);
1480
1481 /* We need to store t_self, but not other fields of HeapTupleData */
1482 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1483 LogicalTapeWrite(tape, &tuple->t_self, sizeof(ItemPointerData));
1484 LogicalTapeWrite(tape, tuple->t_data, tuple->t_len);
1485 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1486 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1487}
1488
1489static void
1491 LogicalTape *tape, unsigned int tuplen)
1492{
1495 unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int);
1497 t_len + HEAPTUPLESIZE);
1498
1499 /* Reconstruct the HeapTupleData header */
1500 tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE);
1501 tuple->t_len = t_len;
1502 LogicalTapeReadExact(tape, &tuple->t_self, sizeof(ItemPointerData));
1503 /* We don't currently bother to reconstruct t_tableOid */
1504 tuple->t_tableOid = InvalidOid;
1505 /* Read in the tuple body */
1506 LogicalTapeReadExact(tape, tuple->t_data, tuple->t_len);
1507 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1508 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1509 stup->tuple = tuple;
1510 /* set up first-column key value, if it's a simple column */
1511 if (base->haveDatum1)
1512 stup->datum1 = heap_getattr(tuple,
1513 arg->indexInfo->ii_IndexAttrNumbers[0],
1514 arg->tupDesc,
1515 &stup->isnull1);
1516}
1517
1518static void
1520{
1523
1524 /* Free any execution state created for CLUSTER case */
1525 if (arg->estate != NULL)
1526 {
1527 ExprContext *econtext = GetPerTupleExprContext(arg->estate);
1528
1530 FreeExecutorState(arg->estate);
1531 }
1532}
1533
1534/*
1535 * Routines specialized for IndexTuple case
1536 *
1537 * The btree and hash cases require separate comparison functions, but the
1538 * IndexTuple representation is the same so the copy/write/read support
1539 * functions can be shared.
1540 */
1541
1542static void
1544{
1547 int i;
1548
1549 for (i = 0; i < count; i++)
1550 {
1551 IndexTuple tuple;
1552
1553 tuple = stups[i].tuple;
1554 stups[i].datum1 = index_getattr(tuple,
1555 1,
1556 RelationGetDescr(arg->indexRel),
1557 &stups[i].isnull1);
1558 }
1559}
1560
1561static int
1564{
1565 /*
1566 * This is similar to comparetup_heap(), but expects index tuples. There
1567 * is also special handling for enforcing uniqueness, and special
1568 * treatment for equal keys at the end.
1569 */
1571 SortSupport sortKey = base->sortKeys;
1572 int32 compare;
1573
1574 /* Compare the leading sort key */
1575 compare = ApplySortComparator(a->datum1, a->isnull1,
1576 b->datum1, b->isnull1,
1577 sortKey);
1578 if (compare != 0)
1579 return compare;
1580
1581 /* Compare additional sort keys */
1583}
1584
1585static int
1588{
1591 SortSupport sortKey = base->sortKeys;
1592 IndexTuple tuple1;
1593 IndexTuple tuple2;
1594 int keysz;
1595 TupleDesc tupDes;
1596 bool equal_hasnull = false;
1597 int nkey;
1598 int32 compare;
1599 Datum datum1,
1600 datum2;
1601 bool isnull1,
1602 isnull2;
1603
1604 tuple1 = (IndexTuple) a->tuple;
1605 tuple2 = (IndexTuple) b->tuple;
1606 keysz = base->nKeys;
1607 tupDes = RelationGetDescr(arg->index.indexRel);
1608
1609 if (sortKey->abbrev_converter)
1610 {
1611 datum1 = index_getattr(tuple1, 1, tupDes, &isnull1);
1612 datum2 = index_getattr(tuple2, 1, tupDes, &isnull2);
1613
1614 compare = ApplySortAbbrevFullComparator(datum1, isnull1,
1615 datum2, isnull2,
1616 sortKey);
1617 if (compare != 0)
1618 return compare;
1619 }
1620
1621 /* they are equal, so we only need to examine one null flag */
1622 if (a->isnull1)
1623 equal_hasnull = true;
1624
1625 sortKey++;
1626 for (nkey = 2; nkey <= keysz; nkey++, sortKey++)
1627 {
1628 datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1);
1629 datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2);
1630
1631 compare = ApplySortComparator(datum1, isnull1,
1632 datum2, isnull2,
1633 sortKey);
1634 if (compare != 0)
1635 return compare; /* done when we find unequal attributes */
1636
1637 /* they are equal, so we only need to examine one null flag */
1638 if (isnull1)
1639 equal_hasnull = true;
1640 }
1641
1642 /*
1643 * If btree has asked us to enforce uniqueness, complain if two equal
1644 * tuples are detected (unless there was at least one NULL field and NULLS
1645 * NOT DISTINCT was not set).
1646 *
1647 * It is sufficient to make the test here, because if two tuples are equal
1648 * they *must* get compared at some stage of the sort --- otherwise the
1649 * sort algorithm wouldn't have checked whether one must appear before the
1650 * other.
1651 */
1652 if (arg->enforceUnique && !(!arg->uniqueNullsNotDistinct && equal_hasnull))
1653 {
1655 bool isnull[INDEX_MAX_KEYS];
1656 char *key_desc;
1657
1658 /*
1659 * Some rather brain-dead implementations of qsort (such as the one in
1660 * QNX 4) will sometimes call the comparison routine to compare a
1661 * value to itself, but we always use our own implementation, which
1662 * does not.
1663 */
1664 Assert(tuple1 != tuple2);
1665
1666 index_deform_tuple(tuple1, tupDes, values, isnull);
1667
1668 key_desc = BuildIndexValueDescription(arg->index.indexRel, values, isnull);
1669
1670 ereport(ERROR,
1671 (errcode(ERRCODE_UNIQUE_VIOLATION),
1672 errmsg("could not create unique index \"%s\"",
1673 RelationGetRelationName(arg->index.indexRel)),
1674 key_desc ? errdetail("Key %s is duplicated.", key_desc) :
1675 errdetail("Duplicate keys exist."),
1676 errtableconstraint(arg->index.heapRel,
1677 RelationGetRelationName(arg->index.indexRel))));
1678 }
1679
1680 /*
1681 * If key values are equal, we sort on ItemPointer. This is required for
1682 * btree indexes, since heap TID is treated as an implicit last key
1683 * attribute in order to ensure that all keys in the index are physically
1684 * unique.
1685 */
1686 {
1687 BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
1688 BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid);
1689
1690 if (blk1 != blk2)
1691 return (blk1 < blk2) ? -1 : 1;
1692 }
1693 {
1694 OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid);
1695 OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid);
1696
1697 if (pos1 != pos2)
1698 return (pos1 < pos2) ? -1 : 1;
1699 }
1700
1701 /* ItemPointer values should never be equal */
1702 Assert(false);
1703
1704 return 0;
1705}
1706
1707static int
1710{
1711 Bucket bucket1;
1712 Bucket bucket2;
1713 uint32 hash1;
1714 uint32 hash2;
1715 IndexTuple tuple1;
1716 IndexTuple tuple2;
1719
1720 /*
1721 * Fetch hash keys and mask off bits we don't want to sort by, so that the
1722 * initial sort is just on the bucket number. We know that the first
1723 * column of the index tuple is the hash key.
1724 */
1725 Assert(!a->isnull1);
1726 bucket1 = _hash_hashkey2bucket(DatumGetUInt32(a->datum1),
1727 arg->max_buckets, arg->high_mask,
1728 arg->low_mask);
1729 Assert(!b->isnull1);
1730 bucket2 = _hash_hashkey2bucket(DatumGetUInt32(b->datum1),
1731 arg->max_buckets, arg->high_mask,
1732 arg->low_mask);
1733 if (bucket1 > bucket2)
1734 return 1;
1735 else if (bucket1 < bucket2)
1736 return -1;
1737
1738 /*
1739 * If bucket values are equal, sort by hash values. This allows us to
1740 * insert directly onto bucket/overflow pages, where the index tuples are
1741 * stored in hash order to allow fast binary search within each page.
1742 */
1743 hash1 = DatumGetUInt32(a->datum1);
1744 hash2 = DatumGetUInt32(b->datum1);
1745 if (hash1 > hash2)
1746 return 1;
1747 else if (hash1 < hash2)
1748 return -1;
1749
1750 /*
1751 * If hash values are equal, we sort on ItemPointer. This does not affect
1752 * validity of the finished index, but it may be useful to have index
1753 * scans in physical order.
1754 */
1755 tuple1 = (IndexTuple) a->tuple;
1756 tuple2 = (IndexTuple) b->tuple;
1757
1758 {
1761
1762 if (blk1 != blk2)
1763 return (blk1 < blk2) ? -1 : 1;
1764 }
1765 {
1768
1769 if (pos1 != pos2)
1770 return (pos1 < pos2) ? -1 : 1;
1771 }
1772
1773 /* ItemPointer values should never be equal */
1774 Assert(false);
1775
1776 return 0;
1777}
1778
1779/*
1780 * Sorting for hash indexes only uses one sort key, so this shouldn't ever be
1781 * called. It's only here for consistency.
1782 */
1783static int
1786{
1787 Assert(false);
1788
1789 return 0;
1790}
1791
1792static void
1794{
1796 IndexTuple tuple = (IndexTuple) stup->tuple;
1797 unsigned int tuplen;
1798
1799 tuplen = IndexTupleSize(tuple) + sizeof(tuplen);
1800 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1801 LogicalTapeWrite(tape, tuple, IndexTupleSize(tuple));
1802 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1803 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1804}
1805
1806static void
1808 LogicalTape *tape, unsigned int len)
1809{
1812 unsigned int tuplen = len - sizeof(unsigned int);
1814
1815 LogicalTapeReadExact(tape, tuple, tuplen);
1816 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1817 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1818 stup->tuple = tuple;
1819 /* set up first-column key value */
1820 stup->datum1 = index_getattr(tuple,
1821 1,
1822 RelationGetDescr(arg->indexRel),
1823 &stup->isnull1);
1824}
1825
1826/*
1827 * Routines specialized for BrinTuple case
1828 */
1829
1830static void
1832{
1833 int i;
1834
1835 for (i = 0; i < count; i++)
1836 {
1837 BrinSortTuple *tuple;
1838
1839 tuple = stups[i].tuple;
1840 stups[i].datum1 = UInt32GetDatum(tuple->tuple.bt_blkno);
1841 }
1842}
1843
1844static int
1847{
1848 Assert(TuplesortstateGetPublic(state)->haveDatum1);
1849
1850 if (DatumGetUInt32(a->datum1) > DatumGetUInt32(b->datum1))
1851 return 1;
1852
1853 if (DatumGetUInt32(a->datum1) < DatumGetUInt32(b->datum1))
1854 return -1;
1855
1856 /* silence compilers */
1857 return 0;
1858}
1859
1860static void
1862{
1864 BrinSortTuple *tuple = (BrinSortTuple *) stup->tuple;
1865 unsigned int tuplen = tuple->tuplen;
1866
1867 tuplen = tuplen + sizeof(tuplen);
1868 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1869 LogicalTapeWrite(tape, &tuple->tuple, tuple->tuplen);
1870 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1871 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1872}
1873
1874static void
1876 LogicalTape *tape, unsigned int len)
1877{
1878 BrinSortTuple *tuple;
1880 unsigned int tuplen = len - sizeof(unsigned int);
1881
1882 /*
1883 * Allocate space for the BRIN sort tuple, which is BrinTuple with an
1884 * extra length field.
1885 */
1887 BRINSORTTUPLE_SIZE(tuplen));
1888
1889 tuple->tuplen = tuplen;
1890
1891 LogicalTapeReadExact(tape, &tuple->tuple, tuplen);
1892 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1893 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1894 stup->tuple = tuple;
1895
1896 /* set up first-column key value, which is block number */
1897 stup->datum1 = UInt32GetDatum(tuple->tuple.bt_blkno);
1898}
1899
1900/*
1901 * Routines specialized for GIN case
1902 */
1903
1904static void
1906{
1907 Assert(false);
1908 elog(ERROR, "removeabbrev_index_gin not implemented");
1909}
1910
1911static int
1914{
1916
1917 Assert(!TuplesortstateGetPublic(state)->haveDatum1);
1918
1919 return _gin_compare_tuples((GinTuple *) a->tuple,
1920 (GinTuple *) b->tuple,
1921 base->sortKeys);
1922}
1923
1924static void
1926{
1928 GinTuple *tuple = (GinTuple *) stup->tuple;
1929 unsigned int tuplen = tuple->tuplen;
1930
1931 tuplen = tuplen + sizeof(tuplen);
1932 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1933 LogicalTapeWrite(tape, tuple, tuple->tuplen);
1934 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1935 LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
1936}
1937
1938static void
1940 LogicalTape *tape, unsigned int len)
1941{
1942 GinTuple *tuple;
1944 unsigned int tuplen = len - sizeof(unsigned int);
1945
1946 /*
1947 * Allocate space for the GIN sort tuple, which already has the proper
1948 * length included in the header.
1949 */
1950 tuple = (GinTuple *) tuplesort_readtup_alloc(state, tuplen);
1951
1952 tuple->tuplen = tuplen;
1953
1954 LogicalTapeReadExact(tape, tuple, tuplen);
1955 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
1956 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
1957 stup->tuple = (void *) tuple;
1958
1959 /* no abbreviations (FIXME maybe use attrnum for this?) */
1960 stup->datum1 = (Datum) 0;
1961}
1962
1963/*
1964 * Routines specialized for DatumTuple case
1965 */
1966
1967static void
1969{
1970 int i;
1971
1972 for (i = 0; i < count; i++)
1973 stups[i].datum1 = PointerGetDatum(stups[i].tuple);
1974}
1975
1976static int
1978{
1980 int compare;
1981
1982 compare = ApplySortComparator(a->datum1, a->isnull1,
1983 b->datum1, b->isnull1,
1984 base->sortKeys);
1985 if (compare != 0)
1986 return compare;
1987
1989}
1990
1991static int
1993{
1995 int32 compare = 0;
1996
1997 /* if we have abbreviations, then "tuple" has the original value */
1998 if (base->sortKeys->abbrev_converter)
2000 PointerGetDatum(b->tuple), b->isnull1,
2001 base->sortKeys);
2002
2003 return compare;
2004}
2005
2006static void
2008{
2011 void *waddr;
2012 unsigned int tuplen;
2013 unsigned int writtenlen;
2014
2015 if (stup->isnull1)
2016 {
2017 waddr = NULL;
2018 tuplen = 0;
2019 }
2020 else if (!base->tuples)
2021 {
2022 waddr = &stup->datum1;
2023 tuplen = sizeof(Datum);
2024 }
2025 else
2026 {
2027 waddr = stup->tuple;
2028 tuplen = datumGetSize(PointerGetDatum(stup->tuple), false, arg->datumTypeLen);
2029 Assert(tuplen != 0);
2030 }
2031
2032 writtenlen = tuplen + sizeof(unsigned int);
2033
2034 LogicalTapeWrite(tape, &writtenlen, sizeof(writtenlen));
2035 LogicalTapeWrite(tape, waddr, tuplen);
2036 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
2037 LogicalTapeWrite(tape, &writtenlen, sizeof(writtenlen));
2038}
2039
2040static void
2042 LogicalTape *tape, unsigned int len)
2043{
2045 unsigned int tuplen = len - sizeof(unsigned int);
2046
2047 if (tuplen == 0)
2048 {
2049 /* it's NULL */
2050 stup->datum1 = (Datum) 0;
2051 stup->isnull1 = true;
2052 stup->tuple = NULL;
2053 }
2054 else if (!base->tuples)
2055 {
2056 Assert(tuplen == sizeof(Datum));
2057 LogicalTapeReadExact(tape, &stup->datum1, tuplen);
2058 stup->isnull1 = false;
2059 stup->tuple = NULL;
2060 }
2061 else
2062 {
2063 void *raddr = tuplesort_readtup_alloc(state, tuplen);
2064
2065 LogicalTapeReadExact(tape, raddr, tuplen);
2066 stup->datum1 = PointerGetDatum(raddr);
2067 stup->isnull1 = false;
2068 stup->tuple = raddr;
2069 }
2070
2071 if (base->sortopt & TUPLESORT_RANDOMACCESS) /* need trailing length word? */
2072 LogicalTapeReadExact(tape, &tuplen, sizeof(tuplen));
2073}
int16 AttrNumber
Definition: attnum.h:21
uint32 BlockNumber
Definition: block.h:31
static Datum values[MAXATTR]
Definition: bootstrap.c:153
#define MAXALIGN(LEN)
Definition: c.h:811
int16_t int16
Definition: c.h:534
int32_t int32
Definition: c.h:535
uint32_t uint32
Definition: c.h:539
#define OidIsValid(objectId)
Definition: c.h:775
size_t Size
Definition: c.h:611
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
Size datumGetSize(Datum value, bool typByVal, int typLen)
Definition: datum.c:65
int errdetail(const char *fmt,...)
Definition: elog.c:1207
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define LOG
Definition: elog.h:31
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
TupleTableSlot * MakeSingleTupleTableSlot(TupleDesc tupdesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1427
void ExecDropSingleTupleTableSlot(TupleTableSlot *slot)
Definition: execTuples.c:1443
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1635
const TupleTableSlotOps TTSOpsHeapTuple
Definition: execTuples.c:85
TupleTableSlot * ExecStoreHeapTuple(HeapTuple tuple, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1541
void FreeExecutorState(EState *estate)
Definition: execUtils.c:192
EState * CreateExecutorState(void)
Definition: execUtils.c:88
#define ResetPerTupleExprContext(estate)
Definition: executor.h:662
#define GetPerTupleExprContext(estate)
Definition: executor.h:653
char * BuildIndexValueDescription(Relation indexRelation, const Datum *values, const bool *isnull)
Definition: genam.c:178
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
int _gin_compare_tuples(GinTuple *a, GinTuple *b, SortSupport ssup)
Definition: gininsert.c:2405
uint32 Bucket
Definition: hash.h:35
Assert(PointerIsAligned(start, uint64))
for(;;)
Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket, uint32 highmask, uint32 lowmask)
Definition: hashutil.c:125
HeapTuple heap_copytuple(HeapTuple tuple)
Definition: heaptuple.c:778
MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup, Size extra)
Definition: heaptuple.c:1542
#define HEAPTUPLESIZE
Definition: htup.h:73
HeapTupleData * HeapTuple
Definition: htup.h:71
MinimalTupleData * MinimalTuple
Definition: htup.h:27
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MINIMAL_TUPLE_OFFSET
Definition: htup_details.h:669
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:904
#define MINIMAL_TUPLE_DATA_OFFSET
Definition: htup_details.h:673
IndexInfo * BuildIndexInfo(Relation index)
Definition: index.c:2428
void FormIndexDatum(IndexInfo *indexInfo, TupleTableSlot *slot, EState *estate, Datum *values, bool *isnull)
Definition: index.c:2730
void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: indextuple.c:456
IndexTuple index_form_tuple_context(TupleDesc tupleDescriptor, const Datum *values, const bool *isnull, MemoryContext context)
Definition: indextuple.c:65
long val
Definition: informix.c:689
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
IndexTupleData * IndexTuple
Definition: itup.h:53
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:131
static Size IndexTupleSize(const IndexTupleData *itup)
Definition: itup.h:71
#define INDEX_SIZE_MASK
Definition: itup.h:65
void LogicalTapeWrite(LogicalTape *lt, const void *ptr, size_t size)
Definition: logtape.c:761
void get_typlenbyval(Oid typid, int16 *typlen, bool *typbyval)
Definition: lsyscache.c:2418
void pfree(void *pointer)
Definition: mcxt.c:1594
Size GetMemoryChunkSpace(void *pointer)
Definition: mcxt.c:767
void * palloc0(Size size)
Definition: mcxt.c:1395
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1147
#define SK_BT_DESC
Definition: nbtree.h:1146
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:97
uint16 OffsetNumber
Definition: off.h:24
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:202
void * arg
#define INDEX_MAX_KEYS
const void size_t len
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:232
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
uint64_t Datum
Definition: postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322
static Datum UInt32GetDatum(uint32 X)
Definition: postgres.h:242
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
#define RelationGetDescr(relation)
Definition: rel.h:540
#define RelationGetNumberOfAttributes(relation)
Definition: rel.h:520
#define RelationGetRelationName(relation)
Definition: rel.h:548
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:533
int errtableconstraint(Relation rel, const char *conname)
Definition: relcache.c:6103
void PrepareSortSupportFromGistIndexRel(Relation indexRel, SortSupport ssup)
Definition: sortsupport.c:185
void PrepareSortSupportFromIndexRel(Relation indexRel, bool reverse, SortSupport ssup)
Definition: sortsupport.c:161
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:134
static int ApplySortAbbrevFullComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:339
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:803
BlockNumber bt_blkno
Definition: brin_tuple.h:66
TupleTableSlot * ecxt_scantuple
Definition: execnodes.h:273
int tuplen
Definition: gin_tuple.h:24
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
Oid * rd_indcollation
Definition: rel.h:217
Form_pg_class rd_rel
Definition: rel.h:111
int sk_flags
Definition: skey.h:66
Oid sk_collation
Definition: skey.h:70
AttrNumber sk_attno
Definition: skey.h:67
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
Datum(* abbrev_converter)(Datum original, SortSupport ssup)
Definition: sortsupport.h:172
MemoryContext ssup_cxt
Definition: sortsupport.h:66
bool isnull1
Definition: tuplesort.h:152
void * tuple
Definition: tuplesort.h:150
Datum datum1
Definition: tuplesort.h:151
TuplesortIndexArg index
TuplesortIndexArg index
SortSupport onlyKey
Definition: tuplesort.h:246
MemoryContext maincontext
Definition: tuplesort.h:219
void(* writetup)(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Definition: tuplesort.h:195
void(* removeabbrev)(Tuplesortstate *state, SortTuple *stups, int count)
Definition: tuplesort.h:188
void(* freestate)(Tuplesortstate *state)
Definition: tuplesort.h:213
MemoryContext tuplecontext
Definition: tuplesort.h:222
MemoryContext sortcontext
Definition: tuplesort.h:221
void(* readtup)(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Definition: tuplesort.h:204
SortTupleComparator comparetup
Definition: tuplesort.h:175
SortSupport sortKeys
Definition: tuplesort.h:236
SortTupleComparator comparetup_tiebreak
Definition: tuplesort.h:182
Definition: regguts.h:323
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:160
struct TupleDescData * TupleDesc
Definition: tupdesc.h:145
Tuplesortstate * tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
Definition: tuplesort.c:638
void tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, bool useAbbrev, Size tuplen)
Definition: tuplesort.c:1165
bool tuplesort_gettuple_common(Tuplesortstate *state, bool forward, SortTuple *stup)
Definition: tuplesort.c:1466
bool trace_sort
Definition: tuplesort.c:124
void * tuplesort_readtup_alloc(Tuplesortstate *state, Size tuplen)
Definition: tuplesort.c:2877
#define TupleSortUseBumpTupleCxt(opt)
Definition: tuplesort.h:109
#define PARALLEL_SORT(coordinate)
Definition: tuplesort.h:256
#define TUPLESORT_RANDOMACCESS
Definition: tuplesort.h:97
#define LogicalTapeReadExact(tape, ptr, len)
Definition: tuplesort.h:263
#define TuplesortstateGetPublic(state)
Definition: tuplesort.h:260
IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward)
static void writetup_index_brin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
Tuplesortstate * tuplesort_begin_index_gin(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
HeapTuple tuplesort_getheaptuple(Tuplesortstate *state, bool forward)
static void removeabbrev_datum(Tuplesortstate *state, SortTuple *stups, int count)
static int comparetup_index_btree_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
void tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
static int comparetup_index_btree(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
GinTuple * tuplesort_getgintuple(Tuplesortstate *state, Size *len, bool forward)
void tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, ItemPointer self, const Datum *values, const bool *isnull)
static void readtup_index(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static int comparetup_cluster_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void writetup_index_gin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
void tuplesort_puttupleslot(Tuplesortstate *state, TupleTableSlot *slot)
static int comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
Tuplesortstate * tuplesort_begin_index_brin(int workMem, SortCoordinate coordinate, int sortopt)
Tuplesortstate * tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, AttrNumber *attNums, Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags, int workMem, SortCoordinate coordinate, int sortopt)
Tuplesortstate * tuplesort_begin_cluster(TupleDesc tupDesc, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
BrinTuple * tuplesort_getbrintuple(Tuplesortstate *state, Size *len, bool forward)
static void removeabbrev_index(Tuplesortstate *state, SortTuple *stups, int count)
Tuplesortstate * tuplesort_begin_index_btree(Relation heapRel, Relation indexRel, bool enforceUnique, bool uniqueNullsNotDistinct, int workMem, SortCoordinate coordinate, int sortopt)
static int comparetup_datum_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_datum(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
#define INDEX_SORT
static void readtup_heap(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void readtup_cluster(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int tuplen)
Tuplesortstate * tuplesort_begin_index_gist(Relation heapRel, Relation indexRel, int workMem, SortCoordinate coordinate, int sortopt)
static void writetup_datum(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
struct BrinSortTuple BrinSortTuple
#define CLUSTER_SORT
static int comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
bool tuplesort_gettupleslot(Tuplesortstate *state, bool forward, bool copy, TupleTableSlot *slot, Datum *abbrev)
static void removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
#define BRINSORTTUPLE_SIZE(len)
#define DATUM_SORT
void tuplesort_putgintuple(Tuplesortstate *state, GinTuple *tuple, Size size)
static int comparetup_index_hash(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void readtup_index_brin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
Tuplesortstate * tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, bool nullsFirstFlag, int workMem, SortCoordinate coordinate, int sortopt)
void tuplesort_putbrintuple(Tuplesortstate *state, BrinTuple *tuple, Size size)
static void writetup_heap(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void writetup_index(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void readtup_index_gin(Tuplesortstate *state, SortTuple *stup, LogicalTape *tape, unsigned int len)
static void removeabbrev_index_gin(Tuplesortstate *state, SortTuple *stups, int count)
Tuplesortstate * tuplesort_begin_index_hash(Relation heapRel, Relation indexRel, uint32 high_mask, uint32 low_mask, uint32 max_buckets, int workMem, SortCoordinate coordinate, int sortopt)
static int comparetup_heap_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static void freestate_cluster(Tuplesortstate *state)
static void removeabbrev_heap(Tuplesortstate *state, SortTuple *stups, int count)
void tuplesort_putheaptuple(Tuplesortstate *state, HeapTuple tup)
static int comparetup_cluster(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_brin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static int comparetup_index_hash_tiebreak(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
bool tuplesort_getdatum(Tuplesortstate *state, bool forward, bool copy, Datum *val, bool *isNull, Datum *abbrev)
static void writetup_cluster(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
static void removeabbrev_cluster(Tuplesortstate *state, SortTuple *stups, int count)
#define HEAP_SORT
static int comparetup_index_gin(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:496
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:458
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:386
#define TYPECACHE_LT_OPR
Definition: typcache.h:139