Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
array_typanalyze.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * array_typanalyze.c
4 * Functions for gathering statistics from array columns
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/utils/adt/array_typanalyze.c
12 *
13 *-------------------------------------------------------------------------
14 */
15#include "postgres.h"
16
17#include "access/detoast.h"
18#include "commands/vacuum.h"
19#include "utils/array.h"
20#include "utils/datum.h"
21#include "utils/fmgrprotos.h"
22#include "utils/lsyscache.h"
23#include "utils/typcache.h"
24
25
26/*
27 * To avoid consuming too much memory, IO and CPU load during analysis, and/or
28 * too much space in the resulting pg_statistic rows, we ignore arrays that
29 * are wider than ARRAY_WIDTH_THRESHOLD (after detoasting!). Note that this
30 * number is considerably more than the similar WIDTH_THRESHOLD limit used
31 * in analyze.c's standard typanalyze code.
32 */
33#define ARRAY_WIDTH_THRESHOLD 0x10000
34
35/* Extra data for compute_array_stats function */
36typedef struct
37{
38 /* Information about array element type */
39 Oid type_id; /* element type's OID */
40 Oid eq_opr; /* default equality operator's OID */
41 Oid coll_id; /* collation to use */
42 bool typbyval; /* physical properties of element type */
45
46 /*
47 * Lookup data for element type's comparison and hash functions (these are
48 * in the type's typcache entry, which we expect to remain valid over the
49 * lifespan of the ANALYZE run)
50 */
53
54 /* Saved state from std_typanalyze() */
58
59/*
60 * While compute_array_stats is running, we keep a pointer to the extra data
61 * here for use by assorted subroutines. compute_array_stats doesn't
62 * currently need to be re-entrant, so avoiding this is not worth the extra
63 * notational cruft that would be needed.
64 */
66
67/* A hash table entry for the Lossy Counting algorithm */
68typedef struct
69{
70 Datum key; /* This is 'e' from the LC algorithm. */
71 int frequency; /* This is 'f'. */
72 int delta; /* And this is 'delta'. */
73 int last_container; /* For de-duplication of array elements. */
74} TrackItem;
75
76/* A hash table entry for distinct-elements counts */
77typedef struct
78{
79 int count; /* Count of distinct elements in an array */
80 int frequency; /* Number of arrays seen with this count */
82
83static void compute_array_stats(VacAttrStats *stats,
84 AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows);
85static void prune_element_hashtable(HTAB *elements_tab, int b_current);
86static uint32 element_hash(const void *key, Size keysize);
87static int element_match(const void *key1, const void *key2, Size keysize);
88static int element_compare(const void *key1, const void *key2);
89static int trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg);
90static int trackitem_compare_element(const void *e1, const void *e2, void *arg);
91static int countitem_compare_count(const void *e1, const void *e2, void *arg);
92
93
94/*
95 * array_typanalyze -- typanalyze function for array columns
96 */
99{
101 Oid element_typeid;
102 TypeCacheEntry *typentry;
103 ArrayAnalyzeExtraData *extra_data;
104
105 /*
106 * Call the standard typanalyze function. It may fail to find needed
107 * operators, in which case we also can't do anything, so just fail.
108 */
109 if (!std_typanalyze(stats))
110 PG_RETURN_BOOL(false);
111
112 /*
113 * Check attribute data type is a varlena array (or a domain over one).
114 */
115 element_typeid = get_base_element_type(stats->attrtypid);
116 if (!OidIsValid(element_typeid))
117 elog(ERROR, "array_typanalyze was invoked for non-array type %u",
118 stats->attrtypid);
119
120 /*
121 * Gather information about the element type. If we fail to find
122 * something, return leaving the state from std_typanalyze() in place.
123 */
124 typentry = lookup_type_cache(element_typeid,
128
129 if (!OidIsValid(typentry->eq_opr) ||
130 !OidIsValid(typentry->cmp_proc_finfo.fn_oid) ||
132 PG_RETURN_BOOL(true);
133
134 /* Store our findings for use by compute_array_stats() */
135 extra_data = (ArrayAnalyzeExtraData *) palloc(sizeof(ArrayAnalyzeExtraData));
136 extra_data->type_id = typentry->type_id;
137 extra_data->eq_opr = typentry->eq_opr;
138 extra_data->coll_id = stats->attrcollid; /* collation we should use */
139 extra_data->typbyval = typentry->typbyval;
140 extra_data->typlen = typentry->typlen;
141 extra_data->typalign = typentry->typalign;
142 extra_data->cmp = &typentry->cmp_proc_finfo;
143 extra_data->hash = &typentry->hash_proc_finfo;
144
145 /* Save old compute_stats and extra_data for scalar statistics ... */
146 extra_data->std_compute_stats = stats->compute_stats;
147 extra_data->std_extra_data = stats->extra_data;
148
149 /* ... and replace with our info */
151 stats->extra_data = extra_data;
152
153 /*
154 * Note we leave stats->minrows set as std_typanalyze set it. Should it
155 * be increased for array analysis purposes?
156 */
157
158 PG_RETURN_BOOL(true);
159}
160
161/*
162 * compute_array_stats() -- compute statistics for an array column
163 *
164 * This function computes statistics useful for determining selectivity of
165 * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
166 * compute_stats hook after sample rows have been collected.
167 *
168 * We also invoke the standard compute_stats function, which will compute
169 * "scalar" statistics relevant to the btree-style array comparison operators.
170 * However, exact duplicates of an entire array may be rare despite many
171 * arrays sharing individual elements. This especially afflicts long arrays,
172 * which are also liable to lack all scalar statistics due to the low
173 * WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
174 * we find the most common array elements and compute a histogram of distinct
175 * element counts.
176 *
177 * The algorithm used is Lossy Counting, as proposed in the paper "Approximate
178 * frequency counts over data streams" by G. S. Manku and R. Motwani, in
179 * Proceedings of the 28th International Conference on Very Large Data Bases,
180 * Hong Kong, China, August 2002, section 4.2. The paper is available at
181 * http://www.vldb.org/conf/2002/S10P03.pdf
182 *
183 * The Lossy Counting (aka LC) algorithm goes like this:
184 * Let s be the threshold frequency for an item (the minimum frequency we
185 * are interested in) and epsilon the error margin for the frequency. Let D
186 * be a set of triples (e, f, delta), where e is an element value, f is that
187 * element's frequency (actually, its current occurrence count) and delta is
188 * the maximum error in f. We start with D empty and process the elements in
189 * batches of size w. (The batch size is also known as "bucket size" and is
190 * equal to 1/epsilon.) Let the current batch number be b_current, starting
191 * with 1. For each element e we either increment its f count, if it's
192 * already in D, or insert a new triple into D with values (e, 1, b_current
193 * - 1). After processing each batch we prune D, by removing from it all
194 * elements with f + delta <= b_current. After the algorithm finishes we
195 * suppress all elements from D that do not satisfy f >= (s - epsilon) * N,
196 * where N is the total number of elements in the input. We emit the
197 * remaining elements with estimated frequency f/N. The LC paper proves
198 * that this algorithm finds all elements with true frequency at least s,
199 * and that no frequency is overestimated or is underestimated by more than
200 * epsilon. Furthermore, given reasonable assumptions about the input
201 * distribution, the required table size is no more than about 7 times w.
202 *
203 * In the absence of a principled basis for other particular values, we
204 * follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
205 * But we leave out the correction for stopwords, which do not apply to
206 * arrays. These parameters give bucket width w = K/0.007 and maximum
207 * expected hashtable size of about 1000 * K.
208 *
209 * Elements may repeat within an array. Since duplicates do not change the
210 * behavior of <@, && or @>, we want to count each element only once per
211 * array. Therefore, we store in the finished pg_statistic entry each
212 * element's frequency as the fraction of all non-null rows that contain it.
213 * We divide the raw counts by nonnull_cnt to get those figures.
214 */
215static void
217 int samplerows, double totalrows)
218{
219 ArrayAnalyzeExtraData *extra_data;
220 int num_mcelem;
221 int null_elem_cnt = 0;
222 int analyzed_rows = 0;
223
224 /* This is D from the LC algorithm. */
225 HTAB *elements_tab;
226 HASHCTL elem_hash_ctl;
227 HASH_SEQ_STATUS scan_status;
228
229 /* This is the current bucket number from the LC algorithm */
230 int b_current;
231
232 /* This is 'w' from the LC algorithm */
233 int bucket_width;
234 int array_no;
235 int64 element_no;
236 TrackItem *item;
237 int slot_idx;
238 HTAB *count_tab;
239 HASHCTL count_hash_ctl;
240 DECountItem *count_item;
241
242 extra_data = (ArrayAnalyzeExtraData *) stats->extra_data;
243
244 /*
245 * Invoke analyze.c's standard analysis function to create scalar-style
246 * stats for the column. It will expect its own extra_data pointer, so
247 * temporarily install that.
248 */
249 stats->extra_data = extra_data->std_extra_data;
250 extra_data->std_compute_stats(stats, fetchfunc, samplerows, totalrows);
251 stats->extra_data = extra_data;
252
253 /*
254 * Set up static pointer for use by subroutines. We wait till here in
255 * case std_compute_stats somehow recursively invokes us (probably not
256 * possible, but ...)
257 */
258 array_extra_data = extra_data;
259
260 /*
261 * We want statistics_target * 10 elements in the MCELEM array. This
262 * multiplier is pretty arbitrary, but is meant to reflect the fact that
263 * the number of individual elements tracked in pg_statistic ought to be
264 * more than the number of values for a simple scalar column.
265 */
266 num_mcelem = stats->attstattarget * 10;
267
268 /*
269 * We set bucket width equal to num_mcelem / 0.007 as per the comment
270 * above.
271 */
272 bucket_width = num_mcelem * 1000 / 7;
273
274 /*
275 * Create the hashtable. It will be in local memory, so we don't need to
276 * worry about overflowing the initial size. Also we don't need to pay any
277 * attention to locking and memory management.
278 */
279 elem_hash_ctl.keysize = sizeof(Datum);
280 elem_hash_ctl.entrysize = sizeof(TrackItem);
281 elem_hash_ctl.hash = element_hash;
282 elem_hash_ctl.match = element_match;
283 elem_hash_ctl.hcxt = CurrentMemoryContext;
284 elements_tab = hash_create("Analyzed elements table",
285 num_mcelem,
286 &elem_hash_ctl,
288
289 /* hashtable for array distinct elements counts */
290 count_hash_ctl.keysize = sizeof(int);
291 count_hash_ctl.entrysize = sizeof(DECountItem);
292 count_hash_ctl.hcxt = CurrentMemoryContext;
293 count_tab = hash_create("Array distinct element count table",
294 64,
295 &count_hash_ctl,
297
298 /* Initialize counters. */
299 b_current = 1;
300 element_no = 0;
301
302 /* Loop over the arrays. */
303 for (array_no = 0; array_no < samplerows; array_no++)
304 {
305 Datum value;
306 bool isnull;
307 ArrayType *array;
308 int num_elems;
309 Datum *elem_values;
310 bool *elem_nulls;
311 bool null_present;
312 int j;
313 int64 prev_element_no = element_no;
314 int distinct_count;
315 bool count_item_found;
316
317 vacuum_delay_point(true);
318
319 value = fetchfunc(stats, array_no, &isnull);
320 if (isnull)
321 {
322 /* ignore arrays that are null overall */
323 continue;
324 }
325
326 /* Skip too-large values. */
328 continue;
329 else
330 analyzed_rows++;
331
332 /*
333 * Now detoast the array if needed, and deconstruct into datums.
334 */
335 array = DatumGetArrayTypeP(value);
336
337 Assert(ARR_ELEMTYPE(array) == extra_data->type_id);
338 deconstruct_array(array,
339 extra_data->type_id,
340 extra_data->typlen,
341 extra_data->typbyval,
342 extra_data->typalign,
343 &elem_values, &elem_nulls, &num_elems);
344
345 /*
346 * We loop through the elements in the array and add them to our
347 * tracking hashtable.
348 */
349 null_present = false;
350 for (j = 0; j < num_elems; j++)
351 {
352 Datum elem_value;
353 bool found;
354
355 /* No null element processing other than flag setting here */
356 if (elem_nulls[j])
357 {
358 null_present = true;
359 continue;
360 }
361
362 /* Lookup current element in hashtable, adding it if new */
363 elem_value = elem_values[j];
364 item = (TrackItem *) hash_search(elements_tab,
365 &elem_value,
366 HASH_ENTER, &found);
367
368 if (found)
369 {
370 /* The element value is already on the tracking list */
371
372 /*
373 * The operators we assist ignore duplicate array elements, so
374 * count a given distinct element only once per array.
375 */
376 if (item->last_container == array_no)
377 continue;
378
379 item->frequency++;
380 item->last_container = array_no;
381 }
382 else
383 {
384 /* Initialize new tracking list element */
385
386 /*
387 * If element type is pass-by-reference, we must copy it into
388 * palloc'd space, so that we can release the array below. (We
389 * do this so that the space needed for element values is
390 * limited by the size of the hashtable; if we kept all the
391 * array values around, it could be much more.)
392 */
393 item->key = datumCopy(elem_value,
394 extra_data->typbyval,
395 extra_data->typlen);
396
397 item->frequency = 1;
398 item->delta = b_current - 1;
399 item->last_container = array_no;
400 }
401
402 /* element_no is the number of elements processed (ie N) */
403 element_no++;
404
405 /* We prune the D structure after processing each bucket */
406 if (element_no % bucket_width == 0)
407 {
408 prune_element_hashtable(elements_tab, b_current);
409 b_current++;
410 }
411 }
412
413 /* Count null element presence once per array. */
414 if (null_present)
415 null_elem_cnt++;
416
417 /* Update frequency of the particular array distinct element count. */
418 distinct_count = (int) (element_no - prev_element_no);
419 count_item = (DECountItem *) hash_search(count_tab, &distinct_count,
421 &count_item_found);
422
423 if (count_item_found)
424 count_item->frequency++;
425 else
426 count_item->frequency = 1;
427
428 /* Free memory allocated while detoasting. */
429 if (PointerGetDatum(array) != value)
430 pfree(array);
431 pfree(elem_values);
432 pfree(elem_nulls);
433 }
434
435 /* Skip pg_statistic slots occupied by standard statistics */
436 slot_idx = 0;
437 while (slot_idx < STATISTIC_NUM_SLOTS && stats->stakind[slot_idx] != 0)
438 slot_idx++;
439 if (slot_idx > STATISTIC_NUM_SLOTS - 2)
440 elog(ERROR, "insufficient pg_statistic slots for array stats");
441
442 /* We can only compute real stats if we found some non-null values. */
443 if (analyzed_rows > 0)
444 {
445 int nonnull_cnt = analyzed_rows;
446 int count_items_count;
447 int i;
448 TrackItem **sort_table;
449 int track_len;
450 int64 cutoff_freq;
451 int64 minfreq,
452 maxfreq;
453
454 /*
455 * We assume the standard stats code already took care of setting
456 * stats_valid, stanullfrac, stawidth, stadistinct. We'd have to
457 * re-compute those values if we wanted to not store the standard
458 * stats.
459 */
460
461 /*
462 * Construct an array of the interesting hashtable items, that is,
463 * those meeting the cutoff frequency (s - epsilon)*N. Also identify
464 * the maximum frequency among these items.
465 *
466 * Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
467 * frequency is 9*N / bucket_width.
468 */
469 cutoff_freq = 9 * element_no / bucket_width;
470
471 i = hash_get_num_entries(elements_tab); /* surely enough space */
472 sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
473
474 hash_seq_init(&scan_status, elements_tab);
475 track_len = 0;
476 maxfreq = 0;
477 while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
478 {
479 if (item->frequency > cutoff_freq)
480 {
481 sort_table[track_len++] = item;
482 maxfreq = Max(maxfreq, item->frequency);
483 }
484 }
485 Assert(track_len <= i);
486
487 /* emit some statistics for debug purposes */
488 elog(DEBUG3, "compute_array_stats: target # mces = %d, "
489 "bucket width = %d, "
490 "# elements = " INT64_FORMAT ", hashtable size = %d, "
491 "usable entries = %d",
492 num_mcelem, bucket_width, element_no, i, track_len);
493
494 /*
495 * If we obtained more elements than we really want, get rid of those
496 * with least frequencies. The easiest way is to qsort the array into
497 * descending frequency order and truncate the array.
498 *
499 * If we did not find more elements than we want, then it is safe to
500 * assume that the stored MCE array will contain every element with
501 * frequency above the cutoff. In that case, rather than storing the
502 * smallest frequency we are keeping, we want to store the minimum
503 * frequency that would have been accepted as a valid MCE. The
504 * selectivity functions can assume that that is an upper bound on the
505 * frequency of elements not present in the array.
506 *
507 * If we found no candidate MCEs at all, we still want to record the
508 * cutoff frequency, since it's still valid to assume that no element
509 * has frequency more than that.
510 */
511 if (num_mcelem < track_len)
512 {
513 qsort_interruptible(sort_table, track_len, sizeof(TrackItem *),
515 /* set minfreq to the smallest frequency we're keeping */
516 minfreq = sort_table[num_mcelem - 1]->frequency;
517 }
518 else
519 {
520 num_mcelem = track_len;
521 /* set minfreq to the minimum frequency above the cutoff */
522 minfreq = cutoff_freq + 1;
523 /* ensure maxfreq is nonzero, too */
524 if (track_len == 0)
525 maxfreq = minfreq;
526 }
527
528 /* Generate MCELEM slot entry */
529 if (num_mcelem >= 0)
530 {
531 MemoryContext old_context;
532 Datum *mcelem_values;
533 float4 *mcelem_freqs;
534
535 /*
536 * We want to store statistics sorted on the element value using
537 * the element type's default comparison function. This permits
538 * fast binary searches in selectivity estimation functions.
539 */
540 qsort_interruptible(sort_table, num_mcelem, sizeof(TrackItem *),
542
543 /* Must copy the target values into anl_context */
544 old_context = MemoryContextSwitchTo(stats->anl_context);
545
546 /*
547 * We sorted statistics on the element value, but we want to be
548 * able to find the minimal and maximal frequencies without going
549 * through all the values. We also want the frequency of null
550 * elements. Store these three values at the end of mcelem_freqs.
551 */
552 mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
553 mcelem_freqs = (float4 *) palloc((num_mcelem + 3) * sizeof(float4));
554
555 /*
556 * See comments above about use of nonnull_cnt as the divisor for
557 * the final frequency estimates.
558 */
559 for (i = 0; i < num_mcelem; i++)
560 {
561 TrackItem *titem = sort_table[i];
562
563 mcelem_values[i] = datumCopy(titem->key,
564 extra_data->typbyval,
565 extra_data->typlen);
566 mcelem_freqs[i] = (double) titem->frequency /
567 (double) nonnull_cnt;
568 }
569 mcelem_freqs[i++] = (double) minfreq / (double) nonnull_cnt;
570 mcelem_freqs[i++] = (double) maxfreq / (double) nonnull_cnt;
571 mcelem_freqs[i++] = (double) null_elem_cnt / (double) nonnull_cnt;
572
573 MemoryContextSwitchTo(old_context);
574
575 stats->stakind[slot_idx] = STATISTIC_KIND_MCELEM;
576 stats->staop[slot_idx] = extra_data->eq_opr;
577 stats->stacoll[slot_idx] = extra_data->coll_id;
578 stats->stanumbers[slot_idx] = mcelem_freqs;
579 /* See above comment about extra stanumber entries */
580 stats->numnumbers[slot_idx] = num_mcelem + 3;
581 stats->stavalues[slot_idx] = mcelem_values;
582 stats->numvalues[slot_idx] = num_mcelem;
583 /* We are storing values of element type */
584 stats->statypid[slot_idx] = extra_data->type_id;
585 stats->statyplen[slot_idx] = extra_data->typlen;
586 stats->statypbyval[slot_idx] = extra_data->typbyval;
587 stats->statypalign[slot_idx] = extra_data->typalign;
588 slot_idx++;
589 }
590
591 /* Generate DECHIST slot entry */
592 count_items_count = hash_get_num_entries(count_tab);
593 if (count_items_count > 0)
594 {
595 int num_hist = stats->attstattarget;
596 DECountItem **sorted_count_items;
597 int j;
598 int delta;
599 int64 frac;
600 float4 *hist;
601
602 /* num_hist must be at least 2 for the loop below to work */
603 num_hist = Max(num_hist, 2);
604
605 /*
606 * Create an array of DECountItem pointers, and sort them into
607 * increasing count order.
608 */
609 sorted_count_items = (DECountItem **)
610 palloc(sizeof(DECountItem *) * count_items_count);
611 hash_seq_init(&scan_status, count_tab);
612 j = 0;
613 while ((count_item = (DECountItem *) hash_seq_search(&scan_status)) != NULL)
614 {
615 sorted_count_items[j++] = count_item;
616 }
617 qsort_interruptible(sorted_count_items, count_items_count,
618 sizeof(DECountItem *),
620
621 /*
622 * Prepare to fill stanumbers with the histogram, followed by the
623 * average count. This array must be stored in anl_context.
624 */
625 hist = (float4 *)
627 sizeof(float4) * (num_hist + 1));
628 hist[num_hist] = (double) element_no / (double) nonnull_cnt;
629
630 /*----------
631 * Construct the histogram of distinct-element counts (DECs).
632 *
633 * The object of this loop is to copy the min and max DECs to
634 * hist[0] and hist[num_hist - 1], along with evenly-spaced DECs
635 * in between (where "evenly-spaced" is with reference to the
636 * whole input population of arrays). If we had a complete sorted
637 * array of DECs, one per analyzed row, the i'th hist value would
638 * come from DECs[i * (analyzed_rows - 1) / (num_hist - 1)]
639 * (compare the histogram-making loop in compute_scalar_stats()).
640 * But instead of that we have the sorted_count_items[] array,
641 * which holds unique DEC values with their frequencies (that is,
642 * a run-length-compressed version of the full array). So we
643 * control advancing through sorted_count_items[] with the
644 * variable "frac", which is defined as (x - y) * (num_hist - 1),
645 * where x is the index in the notional DECs array corresponding
646 * to the start of the next sorted_count_items[] element's run,
647 * and y is the index in DECs from which we should take the next
648 * histogram value. We have to advance whenever x <= y, that is
649 * frac <= 0. The x component is the sum of the frequencies seen
650 * so far (up through the current sorted_count_items[] element),
651 * and of course y * (num_hist - 1) = i * (analyzed_rows - 1),
652 * per the subscript calculation above. (The subscript calculation
653 * implies dropping any fractional part of y; in this formulation
654 * that's handled by not advancing until frac reaches 1.)
655 *
656 * Even though frac has a bounded range, it could overflow int32
657 * when working with very large statistics targets, so we do that
658 * math in int64.
659 *----------
660 */
661 delta = analyzed_rows - 1;
662 j = 0; /* current index in sorted_count_items */
663 /* Initialize frac for sorted_count_items[0]; y is initially 0 */
664 frac = (int64) sorted_count_items[0]->frequency * (num_hist - 1);
665 for (i = 0; i < num_hist; i++)
666 {
667 while (frac <= 0)
668 {
669 /* Advance, and update x component of frac */
670 j++;
671 frac += (int64) sorted_count_items[j]->frequency * (num_hist - 1);
672 }
673 hist[i] = sorted_count_items[j]->count;
674 frac -= delta; /* update y for upcoming i increment */
675 }
676 Assert(j == count_items_count - 1);
677
678 stats->stakind[slot_idx] = STATISTIC_KIND_DECHIST;
679 stats->staop[slot_idx] = extra_data->eq_opr;
680 stats->stacoll[slot_idx] = extra_data->coll_id;
681 stats->stanumbers[slot_idx] = hist;
682 stats->numnumbers[slot_idx] = num_hist + 1;
683 slot_idx++;
684 }
685 }
686
687 /*
688 * We don't need to bother cleaning up any of our temporary palloc's. The
689 * hashtable should also go away, as it used a child memory context.
690 */
691}
692
693/*
694 * A function to prune the D structure from the Lossy Counting algorithm.
695 * Consult compute_tsvector_stats() for wider explanation.
696 */
697static void
698prune_element_hashtable(HTAB *elements_tab, int b_current)
699{
700 HASH_SEQ_STATUS scan_status;
701 TrackItem *item;
702
703 hash_seq_init(&scan_status, elements_tab);
704 while ((item = (TrackItem *) hash_seq_search(&scan_status)) != NULL)
705 {
706 if (item->frequency + item->delta <= b_current)
707 {
708 Datum value = item->key;
709
710 if (hash_search(elements_tab, &item->key,
711 HASH_REMOVE, NULL) == NULL)
712 elog(ERROR, "hash table corrupted");
713 /* We should free memory if element is not passed by value */
716 }
717 }
718}
719
720/*
721 * Hash function for elements.
722 *
723 * We use the element type's default hash opclass, and the column collation
724 * if the type is collation-sensitive.
725 */
726static uint32
727element_hash(const void *key, Size keysize)
728{
729 Datum d = *((const Datum *) key);
730 Datum h;
731
734 d);
735 return DatumGetUInt32(h);
736}
737
738/*
739 * Matching function for elements, to be used in hashtable lookups.
740 */
741static int
742element_match(const void *key1, const void *key2, Size keysize)
743{
744 /* The keysize parameter is superfluous here */
745 return element_compare(key1, key2);
746}
747
748/*
749 * Comparison function for elements.
750 *
751 * We use the element type's default btree opclass, and the column collation
752 * if the type is collation-sensitive.
753 *
754 * XXX consider using SortSupport infrastructure
755 */
756static int
757element_compare(const void *key1, const void *key2)
758{
759 Datum d1 = *((const Datum *) key1);
760 Datum d2 = *((const Datum *) key2);
761 Datum c;
762
765 d1, d2);
766 return DatumGetInt32(c);
767}
768
769/*
770 * Comparator for sorting TrackItems by frequencies (descending sort)
771 */
772static int
773trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg)
774{
775 const TrackItem *const *t1 = (const TrackItem *const *) e1;
776 const TrackItem *const *t2 = (const TrackItem *const *) e2;
777
778 return (*t2)->frequency - (*t1)->frequency;
779}
780
781/*
782 * Comparator for sorting TrackItems by element values
783 */
784static int
785trackitem_compare_element(const void *e1, const void *e2, void *arg)
786{
787 const TrackItem *const *t1 = (const TrackItem *const *) e1;
788 const TrackItem *const *t2 = (const TrackItem *const *) e2;
789
790 return element_compare(&(*t1)->key, &(*t2)->key);
791}
792
793/*
794 * Comparator for sorting DECountItems by count
795 */
796static int
797countitem_compare_count(const void *e1, const void *e2, void *arg)
798{
799 const DECountItem *const *t1 = (const DECountItem *const *) e1;
800 const DECountItem *const *t2 = (const DECountItem *const *) e2;
801
802 if ((*t1)->count < (*t2)->count)
803 return -1;
804 else if ((*t1)->count == (*t2)->count)
805 return 0;
806 else
807 return 1;
808}
#define DatumGetArrayTypeP(X)
Definition: array.h:261
#define ARR_ELEMTYPE(a)
Definition: array.h:292
Datum array_typanalyze(PG_FUNCTION_ARGS)
static int trackitem_compare_frequencies_desc(const void *e1, const void *e2, void *arg)
static void compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows)
static int trackitem_compare_element(const void *e1, const void *e2, void *arg)
static int element_match(const void *key1, const void *key2, Size keysize)
static uint32 element_hash(const void *key, Size keysize)
#define ARRAY_WIDTH_THRESHOLD
static int countitem_compare_count(const void *e1, const void *e2, void *arg)
static int element_compare(const void *key1, const void *key2)
static ArrayAnalyzeExtraData * array_extra_data
static void prune_element_hashtable(HTAB *elements_tab, int b_current)
void deconstruct_array(ArrayType *array, Oid elmtype, int elmlen, bool elmbyval, char elmalign, Datum **elemsp, bool **nullsp, int *nelemsp)
Definition: arrayfuncs.c:3631
#define Max(x, y)
Definition: c.h:998
#define INT64_FORMAT
Definition: c.h:557
int64_t int64
Definition: c.h:536
int16_t int16
Definition: c.h:534
uint32_t uint32
Definition: c.h:539
float float4
Definition: c.h:635
#define OidIsValid(objectId)
Definition: c.h:775
size_t Size
Definition: c.h:611
bool std_typanalyze(VacAttrStats *stats)
Definition: analyze.c:1889
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
Size toast_raw_datum_size(Datum value)
Definition: detoast.c:545
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1415
int64 hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1336
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
#define DEBUG3
Definition: elog.h:28
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1149
Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1)
Definition: fmgr.c:1129
#define PG_GETARG_POINTER(n)
Definition: fmgr.h:276
#define PG_FUNCTION_ARGS
Definition: fmgr.h:193
#define PG_RETURN_BOOL(x)
Definition: fmgr.h:359
Assert(PointerIsAligned(start, uint64))
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_COMPARE
Definition: hsearch.h:99
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
static struct @166 value
int j
Definition: isn.c:78
int i
Definition: isn.c:77
Oid get_base_element_type(Oid typid)
Definition: lsyscache.c:2999
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
void * arg
#define STATISTIC_NUM_SLOTS
Definition: pg_statistic.h:127
void qsort_interruptible(void *base, size_t nel, size_t elsize, qsort_arg_comparator cmp, void *arg)
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:232
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
uint64_t Datum
Definition: postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:212
unsigned int Oid
Definition: postgres_ext.h:32
char * c
AnalyzeAttrComputeStatsFunc std_compute_stats
Definition: fmgr.h:57
Oid fn_oid
Definition: fmgr.h:59
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
HashCompareFunc match
Definition: hsearch.h:80
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:222
LexemeHashKey key
Definition: ts_typanalyze.c:35
FmgrInfo hash_proc_finfo
Definition: typcache.h:78
FmgrInfo cmp_proc_finfo
Definition: typcache.h:77
char typalign
Definition: typcache.h:41
bool typbyval
Definition: typcache.h:40
int16 typlen
Definition: typcache.h:39
int16 stakind[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:148
MemoryContext anl_context
Definition: vacuum.h:130
Oid statypid[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:162
Oid staop[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:149
Oid stacoll[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:150
char statypalign[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:165
float4 * stanumbers[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:152
Oid attrtypid
Definition: vacuum.h:126
int attstattarget
Definition: vacuum.h:125
void * extra_data
Definition: vacuum.h:138
bool statypbyval[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:164
int16 statyplen[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:163
int numvalues[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:153
Datum * stavalues[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:154
int numnumbers[STATISTIC_NUM_SLOTS]
Definition: vacuum.h:151
AnalyzeAttrComputeStatsFunc compute_stats
Definition: vacuum.h:136
Oid attrcollid
Definition: vacuum.h:129
TypeCacheEntry * lookup_type_cache(Oid type_id, int flags)
Definition: typcache.c:386
#define TYPECACHE_HASH_PROC_FINFO
Definition: typcache.h:145
#define TYPECACHE_EQ_OPR
Definition: typcache.h:138
#define TYPECACHE_CMP_PROC_FINFO
Definition: typcache.h:144
void vacuum_delay_point(bool is_analyze)
Definition: vacuum.c:2423
Datum(* AnalyzeAttrFetchFunc)(VacAttrStatsP stats, int rownum, bool *isNull)
Definition: vacuum.h:108
void(* AnalyzeAttrComputeStatsFunc)(VacAttrStatsP stats, AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows)
Definition: vacuum.h:111