Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
planner.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * planner.c
4 * The query optimizer external interface.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/optimizer/plan/planner.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <limits.h>
19#include <math.h>
20
21#include "access/genam.h"
22#include "access/parallel.h"
23#include "access/sysattr.h"
24#include "access/table.h"
26#include "catalog/pg_inherits.h"
27#include "catalog/pg_proc.h"
28#include "catalog/pg_type.h"
29#include "executor/executor.h"
30#include "foreign/fdwapi.h"
31#include "jit/jit.h"
32#include "lib/bipartite_match.h"
33#include "lib/knapsack.h"
34#include "miscadmin.h"
35#include "nodes/makefuncs.h"
36#include "nodes/nodeFuncs.h"
37#ifdef OPTIMIZER_DEBUG
38#include "nodes/print.h"
39#endif
40#include "nodes/supportnodes.h"
42#include "optimizer/clauses.h"
43#include "optimizer/cost.h"
44#include "optimizer/optimizer.h"
46#include "optimizer/pathnode.h"
47#include "optimizer/paths.h"
48#include "optimizer/plancat.h"
49#include "optimizer/planmain.h"
50#include "optimizer/planner.h"
51#include "optimizer/prep.h"
52#include "optimizer/subselect.h"
53#include "optimizer/tlist.h"
54#include "parser/analyze.h"
55#include "parser/parse_agg.h"
56#include "parser/parse_clause.h"
58#include "parser/parsetree.h"
61#include "utils/acl.h"
63#include "utils/lsyscache.h"
64#include "utils/rel.h"
65#include "utils/selfuncs.h"
66
67/* GUC parameters */
72
73/* Hook for plugins to get control in planner() */
75
76/* Hook for plugins to get control when grouping_planner() plans upper rels */
78
79
80/* Expression kind codes for preprocess_expression */
81#define EXPRKIND_QUAL 0
82#define EXPRKIND_TARGET 1
83#define EXPRKIND_RTFUNC 2
84#define EXPRKIND_RTFUNC_LATERAL 3
85#define EXPRKIND_VALUES 4
86#define EXPRKIND_VALUES_LATERAL 5
87#define EXPRKIND_LIMIT 6
88#define EXPRKIND_APPINFO 7
89#define EXPRKIND_PHV 8
90#define EXPRKIND_TABLESAMPLE 9
91#define EXPRKIND_ARBITER_ELEM 10
92#define EXPRKIND_TABLEFUNC 11
93#define EXPRKIND_TABLEFUNC_LATERAL 12
94#define EXPRKIND_GROUPEXPR 13
95
96/*
97 * Data specific to grouping sets
98 */
99typedef struct
100{
110
111/*
112 * Temporary structure for use during WindowClause reordering in order to be
113 * able to sort WindowClauses on partitioning/ordering prefix.
114 */
115typedef struct
116{
118 List *uniqueOrder; /* A List of unique ordering/partitioning
119 * clauses per Window */
121
122/* Passthrough data for standard_qp_callback */
123typedef struct
124{
125 List *activeWindows; /* active windows, if any */
126 grouping_sets_data *gset_data; /* grouping sets data, if any */
127 SetOperationStmt *setop; /* parent set operation or NULL if not a
128 * subquery belonging to a set operation */
130
131/* Local functions */
132static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
133static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
134static void grouping_planner(PlannerInfo *root, double tuple_fraction,
135 SetOperationStmt *setops);
137static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
138 int *tleref_to_colnum_map);
140static double preprocess_limit(PlannerInfo *root,
141 double tuple_fraction,
142 int64 *offset_est, int64 *count_est);
144static List *extract_rollup_sets(List *groupingSets);
145static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
146static void standard_qp_callback(PlannerInfo *root, void *extra);
148 double path_rows,
150 List *target_list);
152 RelOptInfo *input_rel,
153 PathTarget *target,
154 bool target_parallel_safe,
158 RelOptInfo *input_rel,
159 RelOptInfo *grouped_rel);
161 PathTarget *target, bool target_parallel_safe,
162 Node *havingQual);
164 RelOptInfo *input_rel,
165 RelOptInfo *grouped_rel,
166 const AggClauseCosts *agg_costs,
168 GroupPathExtraData *extra,
169 RelOptInfo **partially_grouped_rel_p);
171 RelOptInfo *grouped_rel,
172 Path *path,
173 bool is_sorted,
174 bool can_hash,
176 const AggClauseCosts *agg_costs,
177 double dNumGroups);
179 RelOptInfo *input_rel,
180 PathTarget *input_target,
181 PathTarget *output_target,
182 bool output_target_parallel_safe,
183 WindowFuncLists *wflists,
184 List *activeWindows);
186 RelOptInfo *window_rel,
187 Path *path,
188 PathTarget *input_target,
189 PathTarget *output_target,
190 WindowFuncLists *wflists,
191 List *activeWindows);
193 RelOptInfo *input_rel,
194 PathTarget *target);
196 RelOptInfo *input_rel,
197 RelOptInfo *final_distinct_rel,
198 PathTarget *target);
200 RelOptInfo *input_rel,
201 RelOptInfo *distinct_rel);
203 List *needed_pathkeys,
204 List *path_pathkeys);
206 RelOptInfo *input_rel,
207 PathTarget *target,
208 bool target_parallel_safe,
209 double limit_tuples);
211 PathTarget *final_target);
213 PathTarget *grouping_target,
214 Node *havingQual);
215static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
217 WindowFuncLists *wflists);
219static void name_active_windows(List *activeWindows);
221 PathTarget *final_target,
222 List *activeWindows);
224 List *tlist);
226 PathTarget *final_target,
227 bool *have_postponed_srfs);
229 List *targets, List *targets_contain_srfs);
231 RelOptInfo *grouped_rel,
232 RelOptInfo *partially_grouped_rel,
233 const AggClauseCosts *agg_costs,
235 double dNumGroups,
236 GroupPathExtraData *extra);
238 RelOptInfo *grouped_rel,
239 RelOptInfo *input_rel,
241 GroupPathExtraData *extra,
242 bool force_rel_creation);
244 RelOptInfo *rel,
245 Path *path,
246 Path *cheapest_path,
247 List *pathkeys,
248 double limit_tuples);
250static bool can_partial_agg(PlannerInfo *root);
252 RelOptInfo *rel,
253 List *scanjoin_targets,
254 List *scanjoin_targets_contain_srfs,
255 bool scanjoin_target_parallel_safe,
256 bool tlist_same_exprs);
258 RelOptInfo *input_rel,
259 RelOptInfo *grouped_rel,
260 RelOptInfo *partially_grouped_rel,
261 const AggClauseCosts *agg_costs,
264 GroupPathExtraData *extra);
265static bool group_by_has_partkey(RelOptInfo *input_rel,
266 List *targetList,
267 List *groupClause);
268static int common_prefix_cmp(const void *a, const void *b);
270 List *targetlist);
272 List *sortPathkeys, List *groupClause,
273 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
275 List *sortPathkeys, List *groupClause,
276 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
277
278
279/*****************************************************************************
280 *
281 * Query optimizer entry point
282 *
283 * To support loadable plugins that monitor or modify planner behavior,
284 * we provide a hook variable that lets a plugin get control before and
285 * after the standard planning process. The plugin would normally call
286 * standard_planner().
287 *
288 * Note to plugin authors: standard_planner() scribbles on its Query input,
289 * so you'd better copy that data structure if you want to plan more than once.
290 *
291 *****************************************************************************/
293planner(Query *parse, const char *query_string, int cursorOptions,
294 ParamListInfo boundParams)
295{
296 PlannedStmt *result;
297
298 if (planner_hook)
299 result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
300 else
301 result = standard_planner(parse, query_string, cursorOptions, boundParams);
302
303 pgstat_report_plan_id(result->planId, false);
304
305 return result;
306}
307
309standard_planner(Query *parse, const char *query_string, int cursorOptions,
310 ParamListInfo boundParams)
311{
312 PlannedStmt *result;
313 PlannerGlobal *glob;
314 double tuple_fraction;
316 RelOptInfo *final_rel;
317 Path *best_path;
318 Plan *top_plan;
319 ListCell *lp,
320 *lr;
321
322 /*
323 * Set up global state for this planner invocation. This data is needed
324 * across all levels of sub-Query that might exist in the given command,
325 * so we keep it in a separate struct that's linked to by each per-Query
326 * PlannerInfo.
327 */
328 glob = makeNode(PlannerGlobal);
329
330 glob->boundParams = boundParams;
331 glob->subplans = NIL;
332 glob->subpaths = NIL;
333 glob->subroots = NIL;
334 glob->rewindPlanIDs = NULL;
335 glob->finalrtable = NIL;
336 glob->allRelids = NULL;
337 glob->prunableRelids = NULL;
338 glob->finalrteperminfos = NIL;
339 glob->finalrowmarks = NIL;
340 glob->resultRelations = NIL;
341 glob->appendRelations = NIL;
342 glob->partPruneInfos = NIL;
343 glob->relationOids = NIL;
344 glob->invalItems = NIL;
345 glob->paramExecTypes = NIL;
346 glob->lastPHId = 0;
347 glob->lastRowMarkId = 0;
348 glob->lastPlanNodeId = 0;
349 glob->transientPlan = false;
350 glob->dependsOnRole = false;
351 glob->partition_directory = NULL;
352 glob->rel_notnullatts_hash = NULL;
353
354 /*
355 * Assess whether it's feasible to use parallel mode for this query. We
356 * can't do this in a standalone backend, or if the command will try to
357 * modify any data, or if this is a cursor operation, or if GUCs are set
358 * to values that don't permit parallelism, or if parallel-unsafe
359 * functions are present in the query tree.
360 *
361 * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
362 * MATERIALIZED VIEW to use parallel plans, but this is safe only because
363 * the command is writing into a completely new table which workers won't
364 * be able to see. If the workers could see the table, the fact that
365 * group locking would cause them to ignore the leader's heavyweight GIN
366 * page locks would make this unsafe. We'll have to fix that somehow if
367 * we want to allow parallel inserts in general; updates and deletes have
368 * additional problems especially around combo CIDs.)
369 *
370 * For now, we don't try to use parallel mode if we're running inside a
371 * parallel worker. We might eventually be able to relax this
372 * restriction, but for now it seems best not to have parallel workers
373 * trying to create their own parallel workers.
374 */
375 if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
377 parse->commandType == CMD_SELECT &&
378 !parse->hasModifyingCTE &&
381 {
382 /* all the cheap tests pass, so scan the query tree */
384 glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
385 }
386 else
387 {
388 /* skip the query tree scan, just assume it's unsafe */
389 glob->maxParallelHazard = PROPARALLEL_UNSAFE;
390 glob->parallelModeOK = false;
391 }
392
393 /*
394 * glob->parallelModeNeeded is normally set to false here and changed to
395 * true during plan creation if a Gather or Gather Merge plan is actually
396 * created (cf. create_gather_plan, create_gather_merge_plan).
397 *
398 * However, if debug_parallel_query = on or debug_parallel_query =
399 * regress, then we impose parallel mode whenever it's safe to do so, even
400 * if the final plan doesn't use parallelism. It's not safe to do so if
401 * the query contains anything parallel-unsafe; parallelModeOK will be
402 * false in that case. Note that parallelModeOK can't change after this
403 * point. Otherwise, everything in the query is either parallel-safe or
404 * parallel-restricted, and in either case it should be OK to impose
405 * parallel-mode restrictions. If that ends up breaking something, then
406 * either some function the user included in the query is incorrectly
407 * labeled as parallel-safe or parallel-restricted when in reality it's
408 * parallel-unsafe, or else the query planner itself has a bug.
409 */
410 glob->parallelModeNeeded = glob->parallelModeOK &&
412
413 /* Determine what fraction of the plan is likely to be scanned */
414 if (cursorOptions & CURSOR_OPT_FAST_PLAN)
415 {
416 /*
417 * We have no real idea how many tuples the user will ultimately FETCH
418 * from a cursor, but it is often the case that he doesn't want 'em
419 * all, or would prefer a fast-start plan anyway so that he can
420 * process some of the tuples sooner. Use a GUC parameter to decide
421 * what fraction to optimize for.
422 */
423 tuple_fraction = cursor_tuple_fraction;
424
425 /*
426 * We document cursor_tuple_fraction as simply being a fraction, which
427 * means the edge cases 0 and 1 have to be treated specially here. We
428 * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
429 */
430 if (tuple_fraction >= 1.0)
431 tuple_fraction = 0.0;
432 else if (tuple_fraction <= 0.0)
433 tuple_fraction = 1e-10;
434 }
435 else
436 {
437 /* Default assumption is we need all the tuples */
438 tuple_fraction = 0.0;
439 }
440
441 /* primary planning entry point (may recurse for subqueries) */
442 root = subquery_planner(glob, parse, NULL, false, tuple_fraction, NULL);
443
444 /* Select best Path and turn it into a Plan */
445 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
446 best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
447
448 top_plan = create_plan(root, best_path);
449
450 /*
451 * If creating a plan for a scrollable cursor, make sure it can run
452 * backwards on demand. Add a Material node at the top at need.
453 */
454 if (cursorOptions & CURSOR_OPT_SCROLL)
455 {
456 if (!ExecSupportsBackwardScan(top_plan))
457 top_plan = materialize_finished_plan(top_plan);
458 }
459
460 /*
461 * Optionally add a Gather node for testing purposes, provided this is
462 * actually a safe thing to do.
463 *
464 * We can add Gather even when top_plan has parallel-safe initPlans, but
465 * then we have to move the initPlans to the Gather node because of
466 * SS_finalize_plan's limitations. That would cause cosmetic breakage of
467 * regression tests when debug_parallel_query = regress, because initPlans
468 * that would normally appear on the top_plan move to the Gather, causing
469 * them to disappear from EXPLAIN output. That doesn't seem worth kluging
470 * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
471 */
473 top_plan->parallel_safe &&
474 (top_plan->initPlan == NIL ||
476 {
477 Gather *gather = makeNode(Gather);
478 Cost initplan_cost;
479 bool unsafe_initplans;
480
481 gather->plan.targetlist = top_plan->targetlist;
482 gather->plan.qual = NIL;
483 gather->plan.lefttree = top_plan;
484 gather->plan.righttree = NULL;
485 gather->num_workers = 1;
486 gather->single_copy = true;
488
489 /* Transfer any initPlans to the new top node */
490 gather->plan.initPlan = top_plan->initPlan;
491 top_plan->initPlan = NIL;
492
493 /*
494 * Since this Gather has no parallel-aware descendants to signal to,
495 * we don't need a rescan Param.
496 */
497 gather->rescan_param = -1;
498
499 /*
500 * Ideally we'd use cost_gather here, but setting up dummy path data
501 * to satisfy it doesn't seem much cleaner than knowing what it does.
502 */
503 gather->plan.startup_cost = top_plan->startup_cost +
505 gather->plan.total_cost = top_plan->total_cost +
507 gather->plan.plan_rows = top_plan->plan_rows;
508 gather->plan.plan_width = top_plan->plan_width;
509 gather->plan.parallel_aware = false;
510 gather->plan.parallel_safe = false;
511
512 /*
513 * Delete the initplans' cost from top_plan. We needn't add it to the
514 * Gather node, since the above coding already included it there.
515 */
517 &initplan_cost, &unsafe_initplans);
518 top_plan->startup_cost -= initplan_cost;
519 top_plan->total_cost -= initplan_cost;
520
521 /* use parallel mode for parallel plans. */
522 root->glob->parallelModeNeeded = true;
523
524 top_plan = &gather->plan;
525 }
526
527 /*
528 * If any Params were generated, run through the plan tree and compute
529 * each plan node's extParam/allParam sets. Ideally we'd merge this into
530 * set_plan_references' tree traversal, but for now it has to be separate
531 * because we need to visit subplans before not after main plan.
532 */
533 if (glob->paramExecTypes != NIL)
534 {
535 Assert(list_length(glob->subplans) == list_length(glob->subroots));
536 forboth(lp, glob->subplans, lr, glob->subroots)
537 {
538 Plan *subplan = (Plan *) lfirst(lp);
539 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
540
541 SS_finalize_plan(subroot, subplan);
542 }
543 SS_finalize_plan(root, top_plan);
544 }
545
546 /* final cleanup of the plan */
547 Assert(glob->finalrtable == NIL);
548 Assert(glob->finalrteperminfos == NIL);
549 Assert(glob->finalrowmarks == NIL);
550 Assert(glob->resultRelations == NIL);
551 Assert(glob->appendRelations == NIL);
552 top_plan = set_plan_references(root, top_plan);
553 /* ... and the subplans (both regular subplans and initplans) */
554 Assert(list_length(glob->subplans) == list_length(glob->subroots));
555 forboth(lp, glob->subplans, lr, glob->subroots)
556 {
557 Plan *subplan = (Plan *) lfirst(lp);
558 PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
559
560 lfirst(lp) = set_plan_references(subroot, subplan);
561 }
562
563 /* build the PlannedStmt result */
564 result = makeNode(PlannedStmt);
565
566 result->commandType = parse->commandType;
567 result->queryId = parse->queryId;
569 result->hasReturning = (parse->returningList != NIL);
570 result->hasModifyingCTE = parse->hasModifyingCTE;
571 result->canSetTag = parse->canSetTag;
572 result->transientPlan = glob->transientPlan;
573 result->dependsOnRole = glob->dependsOnRole;
575 result->planTree = top_plan;
576 result->partPruneInfos = glob->partPruneInfos;
577 result->rtable = glob->finalrtable;
579 glob->prunableRelids);
580 result->permInfos = glob->finalrteperminfos;
581 result->resultRelations = glob->resultRelations;
582 result->appendRelations = glob->appendRelations;
583 result->subplans = glob->subplans;
584 result->rewindPlanIDs = glob->rewindPlanIDs;
585 result->rowMarks = glob->finalrowmarks;
586 result->relationOids = glob->relationOids;
587 result->invalItems = glob->invalItems;
588 result->paramExecTypes = glob->paramExecTypes;
589 /* utilityStmt should be null, but we might as well copy it */
590 result->utilityStmt = parse->utilityStmt;
591 result->stmt_location = parse->stmt_location;
592 result->stmt_len = parse->stmt_len;
593
594 result->jitFlags = PGJIT_NONE;
595 if (jit_enabled && jit_above_cost >= 0 &&
596 top_plan->total_cost > jit_above_cost)
597 {
598 result->jitFlags |= PGJIT_PERFORM;
599
600 /*
601 * Decide how much effort should be put into generating better code.
602 */
603 if (jit_optimize_above_cost >= 0 &&
605 result->jitFlags |= PGJIT_OPT3;
606 if (jit_inline_above_cost >= 0 &&
608 result->jitFlags |= PGJIT_INLINE;
609
610 /*
611 * Decide which operations should be JITed.
612 */
613 if (jit_expressions)
614 result->jitFlags |= PGJIT_EXPR;
616 result->jitFlags |= PGJIT_DEFORM;
617 }
618
619 if (glob->partition_directory != NULL)
620 DestroyPartitionDirectory(glob->partition_directory);
621
622 return result;
623}
624
625
626/*--------------------
627 * subquery_planner
628 * Invokes the planner on a subquery. We recurse to here for each
629 * sub-SELECT found in the query tree.
630 *
631 * glob is the global state for the current planner run.
632 * parse is the querytree produced by the parser & rewriter.
633 * parent_root is the immediate parent Query's info (NULL at the top level).
634 * hasRecursion is true if this is a recursive WITH query.
635 * tuple_fraction is the fraction of tuples we expect will be retrieved.
636 * tuple_fraction is interpreted as explained for grouping_planner, below.
637 * setops is used for set operation subqueries to provide the subquery with
638 * the context in which it's being used so that Paths correctly sorted for the
639 * set operation can be generated. NULL when not planning a set operation
640 * child, or when a child of a set op that isn't interested in sorted input.
641 *
642 * Basically, this routine does the stuff that should only be done once
643 * per Query object. It then calls grouping_planner. At one time,
644 * grouping_planner could be invoked recursively on the same Query object;
645 * that's not currently true, but we keep the separation between the two
646 * routines anyway, in case we need it again someday.
647 *
648 * subquery_planner will be called recursively to handle sub-Query nodes
649 * found within the query's expressions and rangetable.
650 *
651 * Returns the PlannerInfo struct ("root") that contains all data generated
652 * while planning the subquery. In particular, the Path(s) attached to
653 * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
654 * cheapest way(s) to implement the query. The top level will select the
655 * best Path and pass it through createplan.c to produce a finished Plan.
656 *--------------------
657 */
660 bool hasRecursion, double tuple_fraction,
661 SetOperationStmt *setops)
662{
664 List *newWithCheckOptions;
665 List *newHaving;
666 bool hasOuterJoins;
667 bool hasResultRTEs;
668 RelOptInfo *final_rel;
669 ListCell *l;
670
671 /* Create a PlannerInfo data structure for this subquery */
673 root->parse = parse;
674 root->glob = glob;
675 root->query_level = parent_root ? parent_root->query_level + 1 : 1;
676 root->parent_root = parent_root;
677 root->plan_params = NIL;
678 root->outer_params = NULL;
679 root->planner_cxt = CurrentMemoryContext;
680 root->init_plans = NIL;
681 root->cte_plan_ids = NIL;
682 root->multiexpr_params = NIL;
683 root->join_domains = NIL;
684 root->eq_classes = NIL;
685 root->ec_merging_done = false;
686 root->last_rinfo_serial = 0;
687 root->all_result_relids =
688 parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
689 root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
690 root->append_rel_list = NIL;
691 root->row_identity_vars = NIL;
692 root->rowMarks = NIL;
693 memset(root->upper_rels, 0, sizeof(root->upper_rels));
694 memset(root->upper_targets, 0, sizeof(root->upper_targets));
695 root->processed_groupClause = NIL;
696 root->processed_distinctClause = NIL;
697 root->processed_tlist = NIL;
698 root->update_colnos = NIL;
699 root->grouping_map = NULL;
700 root->minmax_aggs = NIL;
701 root->qual_security_level = 0;
702 root->hasPseudoConstantQuals = false;
703 root->hasAlternativeSubPlans = false;
704 root->placeholdersFrozen = false;
705 root->hasRecursion = hasRecursion;
706 if (hasRecursion)
707 root->wt_param_id = assign_special_exec_param(root);
708 else
709 root->wt_param_id = -1;
710 root->non_recursive_path = NULL;
711 root->partColsUpdated = false;
712
713 /*
714 * Create the top-level join domain. This won't have valid contents until
715 * deconstruct_jointree fills it in, but the node needs to exist before
716 * that so we can build EquivalenceClasses referencing it.
717 */
718 root->join_domains = list_make1(makeNode(JoinDomain));
719
720 /*
721 * If there is a WITH list, process each WITH query and either convert it
722 * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
723 */
724 if (parse->cteList)
726
727 /*
728 * If it's a MERGE command, transform the joinlist as appropriate.
729 */
731
732 /*
733 * Scan the rangetable for relation RTEs and retrieve the necessary
734 * catalog information for each relation. Using this information, clear
735 * the inh flag for any relation that has no children, collect not-null
736 * attribute numbers for any relation that has column not-null
737 * constraints, and expand virtual generated columns for any relation that
738 * contains them. Note that this step does not descend into sublinks and
739 * subqueries; if we pull up any sublinks or subqueries below, their
740 * relation RTEs are processed just before pulling them up.
741 */
743
744 /*
745 * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
746 * that we don't need so many special cases to deal with that situation.
747 */
749
750 /*
751 * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
752 * to transform them into joins. Note that this step does not descend
753 * into subqueries; if we pull up any subqueries below, their SubLinks are
754 * processed just before pulling them up.
755 */
756 if (parse->hasSubLinks)
758
759 /*
760 * Scan the rangetable for function RTEs, do const-simplification on them,
761 * and then inline them if possible (producing subqueries that might get
762 * pulled up next). Recursion issues here are handled in the same way as
763 * for SubLinks.
764 */
766
767 /*
768 * Check to see if any subqueries in the jointree can be merged into this
769 * query.
770 */
772
773 /*
774 * If this is a simple UNION ALL query, flatten it into an appendrel. We
775 * do this now because it requires applying pull_up_subqueries to the leaf
776 * queries of the UNION ALL, which weren't touched above because they
777 * weren't referenced by the jointree (they will be after we do this).
778 */
779 if (parse->setOperations)
781
782 /*
783 * Survey the rangetable to see what kinds of entries are present. We can
784 * skip some later processing if relevant SQL features are not used; for
785 * example if there are no JOIN RTEs we can avoid the expense of doing
786 * flatten_join_alias_vars(). This must be done after we have finished
787 * adding rangetable entries, of course. (Note: actually, processing of
788 * inherited or partitioned rels can cause RTEs for their child tables to
789 * get added later; but those must all be RTE_RELATION entries, so they
790 * don't invalidate the conclusions drawn here.)
791 */
792 root->hasJoinRTEs = false;
793 root->hasLateralRTEs = false;
794 root->group_rtindex = 0;
795 hasOuterJoins = false;
796 hasResultRTEs = false;
797 foreach(l, parse->rtable)
798 {
800
801 switch (rte->rtekind)
802 {
803 case RTE_JOIN:
804 root->hasJoinRTEs = true;
805 if (IS_OUTER_JOIN(rte->jointype))
806 hasOuterJoins = true;
807 break;
808 case RTE_RESULT:
809 hasResultRTEs = true;
810 break;
811 case RTE_GROUP:
812 Assert(parse->hasGroupRTE);
813 root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
814 break;
815 default:
816 /* No work here for other RTE types */
817 break;
818 }
819
820 if (rte->lateral)
821 root->hasLateralRTEs = true;
822
823 /*
824 * We can also determine the maximum security level required for any
825 * securityQuals now. Addition of inheritance-child RTEs won't affect
826 * this, because child tables don't have their own securityQuals; see
827 * expand_single_inheritance_child().
828 */
829 if (rte->securityQuals)
830 root->qual_security_level = Max(root->qual_security_level,
831 list_length(rte->securityQuals));
832 }
833
834 /*
835 * If we have now verified that the query target relation is
836 * non-inheriting, mark it as a leaf target.
837 */
838 if (parse->resultRelation)
839 {
840 RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
841
842 if (!rte->inh)
843 root->leaf_result_relids =
844 bms_make_singleton(parse->resultRelation);
845 }
846
847 /*
848 * This would be a convenient time to check access permissions for all
849 * relations mentioned in the query, since it would be better to fail now,
850 * before doing any detailed planning. However, for historical reasons,
851 * we leave this to be done at executor startup.
852 *
853 * Note, however, that we do need to check access permissions for any view
854 * relations mentioned in the query, in order to prevent information being
855 * leaked by selectivity estimation functions, which only check view owner
856 * permissions on underlying tables (see all_rows_selectable() and its
857 * callers). This is a little ugly, because it means that access
858 * permissions for views will be checked twice, which is another reason
859 * why it would be better to do all the ACL checks here.
860 */
861 foreach(l, parse->rtable)
862 {
864
865 if (rte->perminfoindex != 0 &&
866 rte->relkind == RELKIND_VIEW)
867 {
868 RTEPermissionInfo *perminfo;
869 bool result;
870
871 perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
872 result = ExecCheckOneRelPerms(perminfo);
873 if (!result)
875 get_rel_name(perminfo->relid));
876 }
877 }
878
879 /*
880 * Preprocess RowMark information. We need to do this after subquery
881 * pullup, so that all base relations are present.
882 */
884
885 /*
886 * Set hasHavingQual to remember if HAVING clause is present. Needed
887 * because preprocess_expression will reduce a constant-true condition to
888 * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
889 */
890 root->hasHavingQual = (parse->havingQual != NULL);
891
892 /*
893 * Do expression preprocessing on targetlist and quals, as well as other
894 * random expressions in the querytree. Note that we do not need to
895 * handle sort/group expressions explicitly, because they are actually
896 * part of the targetlist.
897 */
898 parse->targetList = (List *)
899 preprocess_expression(root, (Node *) parse->targetList,
901
902 newWithCheckOptions = NIL;
903 foreach(l, parse->withCheckOptions)
904 {
906
907 wco->qual = preprocess_expression(root, wco->qual,
909 if (wco->qual != NULL)
910 newWithCheckOptions = lappend(newWithCheckOptions, wco);
911 }
912 parse->withCheckOptions = newWithCheckOptions;
913
914 parse->returningList = (List *)
915 preprocess_expression(root, (Node *) parse->returningList,
917
919
920 parse->havingQual = preprocess_expression(root, parse->havingQual,
922
923 foreach(l, parse->windowClause)
924 {
926
927 /* partitionClause/orderClause are sort/group expressions */
932 }
933
934 parse->limitOffset = preprocess_expression(root, parse->limitOffset,
936 parse->limitCount = preprocess_expression(root, parse->limitCount,
938
939 if (parse->onConflict)
940 {
941 parse->onConflict->arbiterElems = (List *)
943 (Node *) parse->onConflict->arbiterElems,
945 parse->onConflict->arbiterWhere =
947 parse->onConflict->arbiterWhere,
949 parse->onConflict->onConflictSet = (List *)
951 (Node *) parse->onConflict->onConflictSet,
953 parse->onConflict->onConflictWhere =
955 parse->onConflict->onConflictWhere,
957 /* exclRelTlist contains only Vars, so no preprocessing needed */
958 }
959
960 foreach(l, parse->mergeActionList)
961 {
963
964 action->targetList = (List *)
966 (Node *) action->targetList,
968 action->qual =
970 (Node *) action->qual,
972 }
973
974 parse->mergeJoinCondition =
975 preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
976
977 root->append_rel_list = (List *)
978 preprocess_expression(root, (Node *) root->append_rel_list,
980
981 /* Also need to preprocess expressions within RTEs */
982 foreach(l, parse->rtable)
983 {
985 int kind;
986 ListCell *lcsq;
987
988 if (rte->rtekind == RTE_RELATION)
989 {
990 if (rte->tablesample)
993 (Node *) rte->tablesample,
995 }
996 else if (rte->rtekind == RTE_SUBQUERY)
997 {
998 /*
999 * We don't want to do all preprocessing yet on the subquery's
1000 * expressions, since that will happen when we plan it. But if it
1001 * contains any join aliases of our level, those have to get
1002 * expanded now, because planning of the subquery won't do it.
1003 * That's only possible if the subquery is LATERAL.
1004 */
1005 if (rte->lateral && root->hasJoinRTEs)
1006 rte->subquery = (Query *)
1008 (Node *) rte->subquery);
1009 }
1010 else if (rte->rtekind == RTE_FUNCTION)
1011 {
1012 /* Preprocess the function expression(s) fully */
1013 kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1014 rte->functions = (List *)
1015 preprocess_expression(root, (Node *) rte->functions, kind);
1016 }
1017 else if (rte->rtekind == RTE_TABLEFUNC)
1018 {
1019 /* Preprocess the function expression(s) fully */
1020 kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
1021 rte->tablefunc = (TableFunc *)
1022 preprocess_expression(root, (Node *) rte->tablefunc, kind);
1023 }
1024 else if (rte->rtekind == RTE_VALUES)
1025 {
1026 /* Preprocess the values lists fully */
1027 kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1028 rte->values_lists = (List *)
1030 }
1031 else if (rte->rtekind == RTE_GROUP)
1032 {
1033 /* Preprocess the groupexprs list fully */
1034 rte->groupexprs = (List *)
1035 preprocess_expression(root, (Node *) rte->groupexprs,
1037 }
1038
1039 /*
1040 * Process each element of the securityQuals list as if it were a
1041 * separate qual expression (as indeed it is). We need to do it this
1042 * way to get proper canonicalization of AND/OR structure. Note that
1043 * this converts each element into an implicit-AND sublist.
1044 */
1045 foreach(lcsq, rte->securityQuals)
1046 {
1048 (Node *) lfirst(lcsq),
1050 }
1051 }
1052
1053 /*
1054 * Now that we are done preprocessing expressions, and in particular done
1055 * flattening join alias variables, get rid of the joinaliasvars lists.
1056 * They no longer match what expressions in the rest of the tree look
1057 * like, because we have not preprocessed expressions in those lists (and
1058 * do not want to; for example, expanding a SubLink there would result in
1059 * a useless unreferenced subplan). Leaving them in place simply creates
1060 * a hazard for later scans of the tree. We could try to prevent that by
1061 * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1062 * but that doesn't sound very reliable.
1063 */
1064 if (root->hasJoinRTEs)
1065 {
1066 foreach(l, parse->rtable)
1067 {
1069
1070 rte->joinaliasvars = NIL;
1071 }
1072 }
1073
1074 /*
1075 * Replace any Vars in the subquery's targetlist and havingQual that
1076 * reference GROUP outputs with the underlying grouping expressions.
1077 *
1078 * Note that we need to perform this replacement after we've preprocessed
1079 * the grouping expressions. This is to ensure that there is only one
1080 * instance of SubPlan for each SubLink contained within the grouping
1081 * expressions.
1082 */
1083 if (parse->hasGroupRTE)
1084 {
1085 parse->targetList = (List *)
1086 flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1087 parse->havingQual =
1088 flatten_group_exprs(root, root->parse, parse->havingQual);
1089 }
1090
1091 /* Constant-folding might have removed all set-returning functions */
1092 if (parse->hasTargetSRFs)
1093 parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1094
1095 /*
1096 * In some cases we may want to transfer a HAVING clause into WHERE. We
1097 * cannot do so if the HAVING clause contains aggregates (obviously) or
1098 * volatile functions (since a HAVING clause is supposed to be executed
1099 * only once per group). We also can't do this if there are any nonempty
1100 * grouping sets and the clause references any columns that are nullable
1101 * by the grouping sets; moving such a clause into WHERE would potentially
1102 * change the results. (If there are only empty grouping sets, then the
1103 * HAVING clause must be degenerate as discussed below.)
1104 *
1105 * Also, it may be that the clause is so expensive to execute that we're
1106 * better off doing it only once per group, despite the loss of
1107 * selectivity. This is hard to estimate short of doing the entire
1108 * planning process twice, so we use a heuristic: clauses containing
1109 * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1110 * clause into WHERE, in hopes of eliminating tuples before aggregation
1111 * instead of after.
1112 *
1113 * If the query has explicit grouping then we can simply move such a
1114 * clause into WHERE; any group that fails the clause will not be in the
1115 * output because none of its tuples will reach the grouping or
1116 * aggregation stage. Otherwise we must have a degenerate (variable-free)
1117 * HAVING clause, which we put in WHERE so that query_planner() can use it
1118 * in a gating Result node, but also keep in HAVING to ensure that we
1119 * don't emit a bogus aggregated row. (This could be done better, but it
1120 * seems not worth optimizing.)
1121 *
1122 * Note that a HAVING clause may contain expressions that are not fully
1123 * preprocessed. This can happen if these expressions are part of
1124 * grouping items. In such cases, they are replaced with GROUP Vars in
1125 * the parser and then replaced back after we've done with expression
1126 * preprocessing on havingQual. This is not an issue if the clause
1127 * remains in HAVING, because these expressions will be matched to lower
1128 * target items in setrefs.c. However, if the clause is moved or copied
1129 * into WHERE, we need to ensure that these expressions are fully
1130 * preprocessed.
1131 *
1132 * Note that both havingQual and parse->jointree->quals are in
1133 * implicitly-ANDed-list form at this point, even though they are declared
1134 * as Node *.
1135 */
1136 newHaving = NIL;
1137 foreach(l, (List *) parse->havingQual)
1138 {
1139 Node *havingclause = (Node *) lfirst(l);
1140
1141 if (contain_agg_clause(havingclause) ||
1142 contain_volatile_functions(havingclause) ||
1143 contain_subplans(havingclause) ||
1144 (parse->groupClause && parse->groupingSets &&
1145 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1146 {
1147 /* keep it in HAVING */
1148 newHaving = lappend(newHaving, havingclause);
1149 }
1150 else if (parse->groupClause)
1151 {
1152 Node *whereclause;
1153
1154 /* Preprocess the HAVING clause fully */
1155 whereclause = preprocess_expression(root, havingclause,
1157 /* ... and move it to WHERE */
1158 parse->jointree->quals = (Node *)
1159 list_concat((List *) parse->jointree->quals,
1160 (List *) whereclause);
1161 }
1162 else
1163 {
1164 Node *whereclause;
1165
1166 /* Preprocess the HAVING clause fully */
1167 whereclause = preprocess_expression(root, copyObject(havingclause),
1169 /* ... and put a copy in WHERE */
1170 parse->jointree->quals = (Node *)
1171 list_concat((List *) parse->jointree->quals,
1172 (List *) whereclause);
1173 /* ... and also keep it in HAVING */
1174 newHaving = lappend(newHaving, havingclause);
1175 }
1176 }
1177 parse->havingQual = (Node *) newHaving;
1178
1179 /*
1180 * If we have any outer joins, try to reduce them to plain inner joins.
1181 * This step is most easily done after we've done expression
1182 * preprocessing.
1183 */
1184 if (hasOuterJoins)
1186
1187 /*
1188 * If we have any RTE_RESULT relations, see if they can be deleted from
1189 * the jointree. We also rely on this processing to flatten single-child
1190 * FromExprs underneath outer joins. This step is most effectively done
1191 * after we've done expression preprocessing and outer join reduction.
1192 */
1193 if (hasResultRTEs || hasOuterJoins)
1195
1196 /*
1197 * Do the main planning.
1198 */
1199 grouping_planner(root, tuple_fraction, setops);
1200
1201 /*
1202 * Capture the set of outer-level param IDs we have access to, for use in
1203 * extParam/allParam calculations later.
1204 */
1206
1207 /*
1208 * If any initPlans were created in this query level, adjust the surviving
1209 * Paths' costs and parallel-safety flags to account for them. The
1210 * initPlans won't actually get attached to the plan tree till
1211 * create_plan() runs, but we must include their effects now.
1212 */
1213 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1214 SS_charge_for_initplans(root, final_rel);
1215
1216 /*
1217 * Make sure we've identified the cheapest Path for the final rel. (By
1218 * doing this here not in grouping_planner, we include initPlan costs in
1219 * the decision, though it's unlikely that will change anything.)
1220 */
1221 set_cheapest(final_rel);
1222
1223 return root;
1224}
1225
1226/*
1227 * preprocess_expression
1228 * Do subquery_planner's preprocessing work for an expression,
1229 * which can be a targetlist, a WHERE clause (including JOIN/ON
1230 * conditions), a HAVING clause, or a few other things.
1231 */
1232static Node *
1234{
1235 /*
1236 * Fall out quickly if expression is empty. This occurs often enough to
1237 * be worth checking. Note that null->null is the correct conversion for
1238 * implicit-AND result format, too.
1239 */
1240 if (expr == NULL)
1241 return NULL;
1242
1243 /*
1244 * If the query has any join RTEs, replace join alias variables with
1245 * base-relation variables. We must do this first, since any expressions
1246 * we may extract from the joinaliasvars lists have not been preprocessed.
1247 * For example, if we did this after sublink processing, sublinks expanded
1248 * out from join aliases would not get processed. But we can skip this in
1249 * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1250 * they can't contain any Vars of the current query level.
1251 */
1252 if (root->hasJoinRTEs &&
1253 !(kind == EXPRKIND_RTFUNC ||
1254 kind == EXPRKIND_VALUES ||
1255 kind == EXPRKIND_TABLESAMPLE ||
1256 kind == EXPRKIND_TABLEFUNC))
1257 expr = flatten_join_alias_vars(root, root->parse, expr);
1258
1259 /*
1260 * Simplify constant expressions. For function RTEs, this was already
1261 * done by preprocess_function_rtes. (But note we must do it again for
1262 * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1263 * un-simplified subexpressions inserted by flattening of subqueries or
1264 * join alias variables.)
1265 *
1266 * Note: an essential effect of this is to convert named-argument function
1267 * calls to positional notation and insert the current actual values of
1268 * any default arguments for functions. To ensure that happens, we *must*
1269 * process all expressions here. Previous PG versions sometimes skipped
1270 * const-simplification if it didn't seem worth the trouble, but we can't
1271 * do that anymore.
1272 *
1273 * Note: this also flattens nested AND and OR expressions into N-argument
1274 * form. All processing of a qual expression after this point must be
1275 * careful to maintain AND/OR flatness --- that is, do not generate a tree
1276 * with AND directly under AND, nor OR directly under OR.
1277 */
1278 if (kind != EXPRKIND_RTFUNC)
1279 expr = eval_const_expressions(root, expr);
1280
1281 /*
1282 * If it's a qual or havingQual, canonicalize it.
1283 */
1284 if (kind == EXPRKIND_QUAL)
1285 {
1286 expr = (Node *) canonicalize_qual((Expr *) expr, false);
1287
1288#ifdef OPTIMIZER_DEBUG
1289 printf("After canonicalize_qual()\n");
1290 pprint(expr);
1291#endif
1292 }
1293
1294 /*
1295 * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1296 * hashfuncid of any that might execute more quickly by using hash lookups
1297 * instead of a linear search.
1298 */
1299 if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1300 {
1302 }
1303
1304 /* Expand SubLinks to SubPlans */
1305 if (root->parse->hasSubLinks)
1306 expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1307
1308 /*
1309 * XXX do not insert anything here unless you have grokked the comments in
1310 * SS_replace_correlation_vars ...
1311 */
1312
1313 /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1314 if (root->query_level > 1)
1315 expr = SS_replace_correlation_vars(root, expr);
1316
1317 /*
1318 * If it's a qual or havingQual, convert it to implicit-AND format. (We
1319 * don't want to do this before eval_const_expressions, since the latter
1320 * would be unable to simplify a top-level AND correctly. Also,
1321 * SS_process_sublinks expects explicit-AND format.)
1322 */
1323 if (kind == EXPRKIND_QUAL)
1324 expr = (Node *) make_ands_implicit((Expr *) expr);
1325
1326 return expr;
1327}
1328
1329/*
1330 * preprocess_qual_conditions
1331 * Recursively scan the query's jointree and do subquery_planner's
1332 * preprocessing work on each qual condition found therein.
1333 */
1334static void
1336{
1337 if (jtnode == NULL)
1338 return;
1339 if (IsA(jtnode, RangeTblRef))
1340 {
1341 /* nothing to do here */
1342 }
1343 else if (IsA(jtnode, FromExpr))
1344 {
1345 FromExpr *f = (FromExpr *) jtnode;
1346 ListCell *l;
1347
1348 foreach(l, f->fromlist)
1350
1352 }
1353 else if (IsA(jtnode, JoinExpr))
1354 {
1355 JoinExpr *j = (JoinExpr *) jtnode;
1356
1359
1360 j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1361 }
1362 else
1363 elog(ERROR, "unrecognized node type: %d",
1364 (int) nodeTag(jtnode));
1365}
1366
1367/*
1368 * preprocess_phv_expression
1369 * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1370 *
1371 * If a LATERAL subquery references an output of another subquery, and that
1372 * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1373 * join, then we'll push the PlaceHolderVar expression down into the subquery
1374 * and later pull it back up during find_lateral_references, which runs after
1375 * subquery_planner has preprocessed all the expressions that were in the
1376 * current query level to start with. So we need to preprocess it then.
1377 */
1378Expr *
1380{
1381 return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1382}
1383
1384/*--------------------
1385 * grouping_planner
1386 * Perform planning steps related to grouping, aggregation, etc.
1387 *
1388 * This function adds all required top-level processing to the scan/join
1389 * Path(s) produced by query_planner.
1390 *
1391 * tuple_fraction is the fraction of tuples we expect will be retrieved.
1392 * tuple_fraction is interpreted as follows:
1393 * 0: expect all tuples to be retrieved (normal case)
1394 * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1395 * from the plan to be retrieved
1396 * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1397 * expected to be retrieved (ie, a LIMIT specification).
1398 * setops is used for set operation subqueries to provide the subquery with
1399 * the context in which it's being used so that Paths correctly sorted for the
1400 * set operation can be generated. NULL when not planning a set operation
1401 * child, or when a child of a set op that isn't interested in sorted input.
1402 *
1403 * Returns nothing; the useful output is in the Paths we attach to the
1404 * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1405 * root->processed_tlist contains the final processed targetlist.
1406 *
1407 * Note that we have not done set_cheapest() on the final rel; it's convenient
1408 * to leave this to the caller.
1409 *--------------------
1410 */
1411static void
1412grouping_planner(PlannerInfo *root, double tuple_fraction,
1413 SetOperationStmt *setops)
1414{
1415 Query *parse = root->parse;
1416 int64 offset_est = 0;
1417 int64 count_est = 0;
1418 double limit_tuples = -1.0;
1419 bool have_postponed_srfs = false;
1420 PathTarget *final_target;
1421 List *final_targets;
1422 List *final_targets_contain_srfs;
1423 bool final_target_parallel_safe;
1424 RelOptInfo *current_rel;
1425 RelOptInfo *final_rel;
1426 FinalPathExtraData extra;
1427 ListCell *lc;
1428
1429 /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1430 if (parse->limitCount || parse->limitOffset)
1431 {
1432 tuple_fraction = preprocess_limit(root, tuple_fraction,
1433 &offset_est, &count_est);
1434
1435 /*
1436 * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1437 * estimate the effects of using a bounded sort.
1438 */
1439 if (count_est > 0 && offset_est >= 0)
1440 limit_tuples = (double) count_est + (double) offset_est;
1441 }
1442
1443 /* Make tuple_fraction accessible to lower-level routines */
1444 root->tuple_fraction = tuple_fraction;
1445
1446 if (parse->setOperations)
1447 {
1448 /*
1449 * Construct Paths for set operations. The results will not need any
1450 * work except perhaps a top-level sort and/or LIMIT. Note that any
1451 * special work for recursive unions is the responsibility of
1452 * plan_set_operations.
1453 */
1454 current_rel = plan_set_operations(root);
1455
1456 /*
1457 * We should not need to call preprocess_targetlist, since we must be
1458 * in a SELECT query node. Instead, use the processed_tlist returned
1459 * by plan_set_operations (since this tells whether it returned any
1460 * resjunk columns!), and transfer any sort key information from the
1461 * original tlist.
1462 */
1463 Assert(parse->commandType == CMD_SELECT);
1464
1465 /* for safety, copy processed_tlist instead of modifying in-place */
1466 root->processed_tlist =
1467 postprocess_setop_tlist(copyObject(root->processed_tlist),
1468 parse->targetList);
1469
1470 /* Also extract the PathTarget form of the setop result tlist */
1471 final_target = current_rel->cheapest_total_path->pathtarget;
1472
1473 /* And check whether it's parallel safe */
1474 final_target_parallel_safe =
1475 is_parallel_safe(root, (Node *) final_target->exprs);
1476
1477 /* The setop result tlist couldn't contain any SRFs */
1478 Assert(!parse->hasTargetSRFs);
1479 final_targets = final_targets_contain_srfs = NIL;
1480
1481 /*
1482 * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1483 * checked already, but let's make sure).
1484 */
1485 if (parse->rowMarks)
1486 ereport(ERROR,
1487 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1488 /*------
1489 translator: %s is a SQL row locking clause such as FOR UPDATE */
1490 errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1492 parse->rowMarks)->strength))));
1493
1494 /*
1495 * Calculate pathkeys that represent result ordering requirements
1496 */
1497 Assert(parse->distinctClause == NIL);
1498 root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1499 parse->sortClause,
1500 root->processed_tlist);
1501 }
1502 else
1503 {
1504 /* No set operations, do regular planning */
1505 PathTarget *sort_input_target;
1506 List *sort_input_targets;
1507 List *sort_input_targets_contain_srfs;
1508 bool sort_input_target_parallel_safe;
1509 PathTarget *grouping_target;
1510 List *grouping_targets;
1511 List *grouping_targets_contain_srfs;
1512 bool grouping_target_parallel_safe;
1513 PathTarget *scanjoin_target;
1514 List *scanjoin_targets;
1515 List *scanjoin_targets_contain_srfs;
1516 bool scanjoin_target_parallel_safe;
1517 bool scanjoin_target_same_exprs;
1518 bool have_grouping;
1519 WindowFuncLists *wflists = NULL;
1520 List *activeWindows = NIL;
1521 grouping_sets_data *gset_data = NULL;
1522 standard_qp_extra qp_extra;
1523
1524 /* A recursive query should always have setOperations */
1525 Assert(!root->hasRecursion);
1526
1527 /* Preprocess grouping sets and GROUP BY clause, if any */
1528 if (parse->groupingSets)
1529 {
1530 gset_data = preprocess_grouping_sets(root);
1531 }
1532 else if (parse->groupClause)
1533 {
1534 /* Preprocess regular GROUP BY clause, if any */
1535 root->processed_groupClause = preprocess_groupclause(root, NIL);
1536 }
1537
1538 /*
1539 * Preprocess targetlist. Note that much of the remaining planning
1540 * work will be done with the PathTarget representation of tlists, but
1541 * we must also maintain the full representation of the final tlist so
1542 * that we can transfer its decoration (resnames etc) to the topmost
1543 * tlist of the finished Plan. This is kept in processed_tlist.
1544 */
1546
1547 /*
1548 * Mark all the aggregates with resolved aggtranstypes, and detect
1549 * aggregates that are duplicates or can share transition state. We
1550 * must do this before slicing and dicing the tlist into various
1551 * pathtargets, else some copies of the Aggref nodes might escape
1552 * being marked.
1553 */
1554 if (parse->hasAggs)
1555 {
1556 preprocess_aggrefs(root, (Node *) root->processed_tlist);
1557 preprocess_aggrefs(root, (Node *) parse->havingQual);
1558 }
1559
1560 /*
1561 * Locate any window functions in the tlist. (We don't need to look
1562 * anywhere else, since expressions used in ORDER BY will be in there
1563 * too.) Note that they could all have been eliminated by constant
1564 * folding, in which case we don't need to do any more work.
1565 */
1566 if (parse->hasWindowFuncs)
1567 {
1568 wflists = find_window_functions((Node *) root->processed_tlist,
1569 list_length(parse->windowClause));
1570 if (wflists->numWindowFuncs > 0)
1571 {
1572 /*
1573 * See if any modifications can be made to each WindowClause
1574 * to allow the executor to execute the WindowFuncs more
1575 * quickly.
1576 */
1577 optimize_window_clauses(root, wflists);
1578
1579 /* Extract the list of windows actually in use. */
1580 activeWindows = select_active_windows(root, wflists);
1581
1582 /* Make sure they all have names, for EXPLAIN's use. */
1583 name_active_windows(activeWindows);
1584 }
1585 else
1586 parse->hasWindowFuncs = false;
1587 }
1588
1589 /*
1590 * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1591 * adding logic between here and the query_planner() call. Anything
1592 * that is needed in MIN/MAX-optimizable cases will have to be
1593 * duplicated in planagg.c.
1594 */
1595 if (parse->hasAggs)
1597
1598 /*
1599 * Figure out whether there's a hard limit on the number of rows that
1600 * query_planner's result subplan needs to return. Even if we know a
1601 * hard limit overall, it doesn't apply if the query has any
1602 * grouping/aggregation operations, or SRFs in the tlist.
1603 */
1604 if (parse->groupClause ||
1605 parse->groupingSets ||
1606 parse->distinctClause ||
1607 parse->hasAggs ||
1608 parse->hasWindowFuncs ||
1609 parse->hasTargetSRFs ||
1610 root->hasHavingQual)
1611 root->limit_tuples = -1.0;
1612 else
1613 root->limit_tuples = limit_tuples;
1614
1615 /* Set up data needed by standard_qp_callback */
1616 qp_extra.activeWindows = activeWindows;
1617 qp_extra.gset_data = gset_data;
1618
1619 /*
1620 * If we're a subquery for a set operation, store the SetOperationStmt
1621 * in qp_extra.
1622 */
1623 qp_extra.setop = setops;
1624
1625 /*
1626 * Generate the best unsorted and presorted paths for the scan/join
1627 * portion of this Query, ie the processing represented by the
1628 * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1629 * We also generate (in standard_qp_callback) pathkey representations
1630 * of the query's sort clause, distinct clause, etc.
1631 */
1632 current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1633
1634 /*
1635 * Convert the query's result tlist into PathTarget format.
1636 *
1637 * Note: this cannot be done before query_planner() has performed
1638 * appendrel expansion, because that might add resjunk entries to
1639 * root->processed_tlist. Waiting till afterwards is also helpful
1640 * because the target width estimates can use per-Var width numbers
1641 * that were obtained within query_planner().
1642 */
1643 final_target = create_pathtarget(root, root->processed_tlist);
1644 final_target_parallel_safe =
1645 is_parallel_safe(root, (Node *) final_target->exprs);
1646
1647 /*
1648 * If ORDER BY was given, consider whether we should use a post-sort
1649 * projection, and compute the adjusted target for preceding steps if
1650 * so.
1651 */
1652 if (parse->sortClause)
1653 {
1654 sort_input_target = make_sort_input_target(root,
1655 final_target,
1656 &have_postponed_srfs);
1657 sort_input_target_parallel_safe =
1658 is_parallel_safe(root, (Node *) sort_input_target->exprs);
1659 }
1660 else
1661 {
1662 sort_input_target = final_target;
1663 sort_input_target_parallel_safe = final_target_parallel_safe;
1664 }
1665
1666 /*
1667 * If we have window functions to deal with, the output from any
1668 * grouping step needs to be what the window functions want;
1669 * otherwise, it should be sort_input_target.
1670 */
1671 if (activeWindows)
1672 {
1673 grouping_target = make_window_input_target(root,
1674 final_target,
1675 activeWindows);
1676 grouping_target_parallel_safe =
1677 is_parallel_safe(root, (Node *) grouping_target->exprs);
1678 }
1679 else
1680 {
1681 grouping_target = sort_input_target;
1682 grouping_target_parallel_safe = sort_input_target_parallel_safe;
1683 }
1684
1685 /*
1686 * If we have grouping or aggregation to do, the topmost scan/join
1687 * plan node must emit what the grouping step wants; otherwise, it
1688 * should emit grouping_target.
1689 */
1690 have_grouping = (parse->groupClause || parse->groupingSets ||
1691 parse->hasAggs || root->hasHavingQual);
1692 if (have_grouping)
1693 {
1694 scanjoin_target = make_group_input_target(root, final_target);
1695 scanjoin_target_parallel_safe =
1696 is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1697 }
1698 else
1699 {
1700 scanjoin_target = grouping_target;
1701 scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1702 }
1703
1704 /*
1705 * If there are any SRFs in the targetlist, we must separate each of
1706 * these PathTargets into SRF-computing and SRF-free targets. Replace
1707 * each of the named targets with a SRF-free version, and remember the
1708 * list of additional projection steps we need to add afterwards.
1709 */
1710 if (parse->hasTargetSRFs)
1711 {
1712 /* final_target doesn't recompute any SRFs in sort_input_target */
1713 split_pathtarget_at_srfs(root, final_target, sort_input_target,
1714 &final_targets,
1715 &final_targets_contain_srfs);
1716 final_target = linitial_node(PathTarget, final_targets);
1717 Assert(!linitial_int(final_targets_contain_srfs));
1718 /* likewise for sort_input_target vs. grouping_target */
1719 split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1720 &sort_input_targets,
1721 &sort_input_targets_contain_srfs);
1722 sort_input_target = linitial_node(PathTarget, sort_input_targets);
1723 Assert(!linitial_int(sort_input_targets_contain_srfs));
1724 /* likewise for grouping_target vs. scanjoin_target */
1725 split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1726 &grouping_targets,
1727 &grouping_targets_contain_srfs);
1728 grouping_target = linitial_node(PathTarget, grouping_targets);
1729 Assert(!linitial_int(grouping_targets_contain_srfs));
1730 /* scanjoin_target will not have any SRFs precomputed for it */
1731 split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1732 &scanjoin_targets,
1733 &scanjoin_targets_contain_srfs);
1734 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1735 Assert(!linitial_int(scanjoin_targets_contain_srfs));
1736 }
1737 else
1738 {
1739 /* initialize lists; for most of these, dummy values are OK */
1740 final_targets = final_targets_contain_srfs = NIL;
1741 sort_input_targets = sort_input_targets_contain_srfs = NIL;
1742 grouping_targets = grouping_targets_contain_srfs = NIL;
1743 scanjoin_targets = list_make1(scanjoin_target);
1744 scanjoin_targets_contain_srfs = NIL;
1745 }
1746
1747 /* Apply scan/join target. */
1748 scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1749 && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1750 apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1751 scanjoin_targets_contain_srfs,
1752 scanjoin_target_parallel_safe,
1753 scanjoin_target_same_exprs);
1754
1755 /*
1756 * Save the various upper-rel PathTargets we just computed into
1757 * root->upper_targets[]. The core code doesn't use this, but it
1758 * provides a convenient place for extensions to get at the info. For
1759 * consistency, we save all the intermediate targets, even though some
1760 * of the corresponding upperrels might not be needed for this query.
1761 */
1762 root->upper_targets[UPPERREL_FINAL] = final_target;
1763 root->upper_targets[UPPERREL_ORDERED] = final_target;
1764 root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1765 root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1766 root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1767 root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1768
1769 /*
1770 * If we have grouping and/or aggregation, consider ways to implement
1771 * that. We build a new upperrel representing the output of this
1772 * phase.
1773 */
1774 if (have_grouping)
1775 {
1776 current_rel = create_grouping_paths(root,
1777 current_rel,
1778 grouping_target,
1779 grouping_target_parallel_safe,
1780 gset_data);
1781 /* Fix things up if grouping_target contains SRFs */
1782 if (parse->hasTargetSRFs)
1783 adjust_paths_for_srfs(root, current_rel,
1784 grouping_targets,
1785 grouping_targets_contain_srfs);
1786 }
1787
1788 /*
1789 * If we have window functions, consider ways to implement those. We
1790 * build a new upperrel representing the output of this phase.
1791 */
1792 if (activeWindows)
1793 {
1794 current_rel = create_window_paths(root,
1795 current_rel,
1796 grouping_target,
1797 sort_input_target,
1798 sort_input_target_parallel_safe,
1799 wflists,
1800 activeWindows);
1801 /* Fix things up if sort_input_target contains SRFs */
1802 if (parse->hasTargetSRFs)
1803 adjust_paths_for_srfs(root, current_rel,
1804 sort_input_targets,
1805 sort_input_targets_contain_srfs);
1806 }
1807
1808 /*
1809 * If there is a DISTINCT clause, consider ways to implement that. We
1810 * build a new upperrel representing the output of this phase.
1811 */
1812 if (parse->distinctClause)
1813 {
1814 current_rel = create_distinct_paths(root,
1815 current_rel,
1816 sort_input_target);
1817 }
1818 } /* end of if (setOperations) */
1819
1820 /*
1821 * If ORDER BY was given, consider ways to implement that, and generate a
1822 * new upperrel containing only paths that emit the correct ordering and
1823 * project the correct final_target. We can apply the original
1824 * limit_tuples limit in sort costing here, but only if there are no
1825 * postponed SRFs.
1826 */
1827 if (parse->sortClause)
1828 {
1829 current_rel = create_ordered_paths(root,
1830 current_rel,
1831 final_target,
1832 final_target_parallel_safe,
1833 have_postponed_srfs ? -1.0 :
1834 limit_tuples);
1835 /* Fix things up if final_target contains SRFs */
1836 if (parse->hasTargetSRFs)
1837 adjust_paths_for_srfs(root, current_rel,
1838 final_targets,
1839 final_targets_contain_srfs);
1840 }
1841
1842 /*
1843 * Now we are prepared to build the final-output upperrel.
1844 */
1845 final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1846
1847 /*
1848 * If the input rel is marked consider_parallel and there's nothing that's
1849 * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1850 * consider_parallel as well. Note that if the query has rowMarks or is
1851 * not a SELECT, consider_parallel will be false for every relation in the
1852 * query.
1853 */
1854 if (current_rel->consider_parallel &&
1855 is_parallel_safe(root, parse->limitOffset) &&
1856 is_parallel_safe(root, parse->limitCount))
1857 final_rel->consider_parallel = true;
1858
1859 /*
1860 * If the current_rel belongs to a single FDW, so does the final_rel.
1861 */
1862 final_rel->serverid = current_rel->serverid;
1863 final_rel->userid = current_rel->userid;
1864 final_rel->useridiscurrent = current_rel->useridiscurrent;
1865 final_rel->fdwroutine = current_rel->fdwroutine;
1866
1867 /*
1868 * Generate paths for the final_rel. Insert all surviving paths, with
1869 * LockRows, Limit, and/or ModifyTable steps added if needed.
1870 */
1871 foreach(lc, current_rel->pathlist)
1872 {
1873 Path *path = (Path *) lfirst(lc);
1874
1875 /*
1876 * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1877 * (Note: we intentionally test parse->rowMarks not root->rowMarks
1878 * here. If there are only non-locking rowmarks, they should be
1879 * handled by the ModifyTable node instead. However, root->rowMarks
1880 * is what goes into the LockRows node.)
1881 */
1882 if (parse->rowMarks)
1883 {
1884 path = (Path *) create_lockrows_path(root, final_rel, path,
1885 root->rowMarks,
1887 }
1888
1889 /*
1890 * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1891 */
1892 if (limit_needed(parse))
1893 {
1894 path = (Path *) create_limit_path(root, final_rel, path,
1895 parse->limitOffset,
1896 parse->limitCount,
1897 parse->limitOption,
1898 offset_est, count_est);
1899 }
1900
1901 /*
1902 * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1903 */
1904 if (parse->commandType != CMD_SELECT)
1905 {
1906 Index rootRelation;
1907 List *resultRelations = NIL;
1908 List *updateColnosLists = NIL;
1909 List *withCheckOptionLists = NIL;
1910 List *returningLists = NIL;
1911 List *mergeActionLists = NIL;
1912 List *mergeJoinConditions = NIL;
1913 List *rowMarks;
1914
1915 if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1916 {
1917 /* Inherited UPDATE/DELETE/MERGE */
1918 RelOptInfo *top_result_rel = find_base_rel(root,
1919 parse->resultRelation);
1920 int resultRelation = -1;
1921
1922 /* Pass the root result rel forward to the executor. */
1923 rootRelation = parse->resultRelation;
1924
1925 /* Add only leaf children to ModifyTable. */
1926 while ((resultRelation = bms_next_member(root->leaf_result_relids,
1927 resultRelation)) >= 0)
1928 {
1929 RelOptInfo *this_result_rel = find_base_rel(root,
1930 resultRelation);
1931
1932 /*
1933 * Also exclude any leaf rels that have turned dummy since
1934 * being added to the list, for example, by being excluded
1935 * by constraint exclusion.
1936 */
1937 if (IS_DUMMY_REL(this_result_rel))
1938 continue;
1939
1940 /* Build per-target-rel lists needed by ModifyTable */
1941 resultRelations = lappend_int(resultRelations,
1942 resultRelation);
1943 if (parse->commandType == CMD_UPDATE)
1944 {
1945 List *update_colnos = root->update_colnos;
1946
1947 if (this_result_rel != top_result_rel)
1948 update_colnos =
1950 update_colnos,
1951 this_result_rel->relid,
1952 top_result_rel->relid);
1953 updateColnosLists = lappend(updateColnosLists,
1954 update_colnos);
1955 }
1956 if (parse->withCheckOptions)
1957 {
1958 List *withCheckOptions = parse->withCheckOptions;
1959
1960 if (this_result_rel != top_result_rel)
1961 withCheckOptions = (List *)
1963 (Node *) withCheckOptions,
1964 this_result_rel,
1965 top_result_rel);
1966 withCheckOptionLists = lappend(withCheckOptionLists,
1967 withCheckOptions);
1968 }
1969 if (parse->returningList)
1970 {
1971 List *returningList = parse->returningList;
1972
1973 if (this_result_rel != top_result_rel)
1974 returningList = (List *)
1976 (Node *) returningList,
1977 this_result_rel,
1978 top_result_rel);
1979 returningLists = lappend(returningLists,
1980 returningList);
1981 }
1982 if (parse->mergeActionList)
1983 {
1984 ListCell *l;
1985 List *mergeActionList = NIL;
1986
1987 /*
1988 * Copy MergeActions and translate stuff that
1989 * references attribute numbers.
1990 */
1991 foreach(l, parse->mergeActionList)
1992 {
1994 *leaf_action = copyObject(action);
1995
1996 leaf_action->qual =
1998 (Node *) action->qual,
1999 this_result_rel,
2000 top_result_rel);
2001 leaf_action->targetList = (List *)
2003 (Node *) action->targetList,
2004 this_result_rel,
2005 top_result_rel);
2006 if (leaf_action->commandType == CMD_UPDATE)
2007 leaf_action->updateColnos =
2009 action->updateColnos,
2010 this_result_rel->relid,
2011 top_result_rel->relid);
2012 mergeActionList = lappend(mergeActionList,
2013 leaf_action);
2014 }
2015
2016 mergeActionLists = lappend(mergeActionLists,
2017 mergeActionList);
2018 }
2019 if (parse->commandType == CMD_MERGE)
2020 {
2021 Node *mergeJoinCondition = parse->mergeJoinCondition;
2022
2023 if (this_result_rel != top_result_rel)
2024 mergeJoinCondition =
2026 mergeJoinCondition,
2027 this_result_rel,
2028 top_result_rel);
2029 mergeJoinConditions = lappend(mergeJoinConditions,
2030 mergeJoinCondition);
2031 }
2032 }
2033
2034 if (resultRelations == NIL)
2035 {
2036 /*
2037 * We managed to exclude every child rel, so generate a
2038 * dummy one-relation plan using info for the top target
2039 * rel (even though that may not be a leaf target).
2040 * Although it's clear that no data will be updated or
2041 * deleted, we still need to have a ModifyTable node so
2042 * that any statement triggers will be executed. (This
2043 * could be cleaner if we fixed nodeModifyTable.c to allow
2044 * zero target relations, but that probably wouldn't be a
2045 * net win.)
2046 */
2047 resultRelations = list_make1_int(parse->resultRelation);
2048 if (parse->commandType == CMD_UPDATE)
2049 updateColnosLists = list_make1(root->update_colnos);
2050 if (parse->withCheckOptions)
2051 withCheckOptionLists = list_make1(parse->withCheckOptions);
2052 if (parse->returningList)
2053 returningLists = list_make1(parse->returningList);
2054 if (parse->mergeActionList)
2055 mergeActionLists = list_make1(parse->mergeActionList);
2056 if (parse->commandType == CMD_MERGE)
2057 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2058 }
2059 }
2060 else
2061 {
2062 /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2063 rootRelation = 0; /* there's no separate root rel */
2064 resultRelations = list_make1_int(parse->resultRelation);
2065 if (parse->commandType == CMD_UPDATE)
2066 updateColnosLists = list_make1(root->update_colnos);
2067 if (parse->withCheckOptions)
2068 withCheckOptionLists = list_make1(parse->withCheckOptions);
2069 if (parse->returningList)
2070 returningLists = list_make1(parse->returningList);
2071 if (parse->mergeActionList)
2072 mergeActionLists = list_make1(parse->mergeActionList);
2073 if (parse->commandType == CMD_MERGE)
2074 mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2075 }
2076
2077 /*
2078 * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2079 * will have dealt with fetching non-locked marked rows, else we
2080 * need to have ModifyTable do that.
2081 */
2082 if (parse->rowMarks)
2083 rowMarks = NIL;
2084 else
2085 rowMarks = root->rowMarks;
2086
2087 path = (Path *)
2088 create_modifytable_path(root, final_rel,
2089 path,
2090 parse->commandType,
2091 parse->canSetTag,
2092 parse->resultRelation,
2093 rootRelation,
2094 root->partColsUpdated,
2095 resultRelations,
2096 updateColnosLists,
2097 withCheckOptionLists,
2098 returningLists,
2099 rowMarks,
2100 parse->onConflict,
2101 mergeActionLists,
2102 mergeJoinConditions,
2104 }
2105
2106 /* And shove it into final_rel */
2107 add_path(final_rel, path);
2108 }
2109
2110 /*
2111 * Generate partial paths for final_rel, too, if outer query levels might
2112 * be able to make use of them.
2113 */
2114 if (final_rel->consider_parallel && root->query_level > 1 &&
2116 {
2117 Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2118 foreach(lc, current_rel->partial_pathlist)
2119 {
2120 Path *partial_path = (Path *) lfirst(lc);
2121
2122 add_partial_path(final_rel, partial_path);
2123 }
2124 }
2125
2127 extra.limit_tuples = limit_tuples;
2128 extra.count_est = count_est;
2129 extra.offset_est = offset_est;
2130
2131 /*
2132 * If there is an FDW that's responsible for all baserels of the query,
2133 * let it consider adding ForeignPaths.
2134 */
2135 if (final_rel->fdwroutine &&
2136 final_rel->fdwroutine->GetForeignUpperPaths)
2137 final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2138 current_rel, final_rel,
2139 &extra);
2140
2141 /* Let extensions possibly add some more paths */
2143 (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2144 current_rel, final_rel, &extra);
2145
2146 /* Note: currently, we leave it to callers to do set_cheapest() */
2147}
2148
2149/*
2150 * Do preprocessing for groupingSets clause and related data. This handles the
2151 * preliminary steps of expanding the grouping sets, organizing them into lists
2152 * of rollups, and preparing annotations which will later be filled in with
2153 * size estimates.
2154 */
2155static grouping_sets_data *
2157{
2158 Query *parse = root->parse;
2159 List *sets;
2160 int maxref = 0;
2161 ListCell *lc_set;
2163
2164 parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2165
2166 gd->any_hashable = false;
2167 gd->unhashable_refs = NULL;
2168 gd->unsortable_refs = NULL;
2169 gd->unsortable_sets = NIL;
2170
2171 /*
2172 * We don't currently make any attempt to optimize the groupClause when
2173 * there are grouping sets, so just duplicate it in processed_groupClause.
2174 */
2175 root->processed_groupClause = parse->groupClause;
2176
2177 if (parse->groupClause)
2178 {
2179 ListCell *lc;
2180
2181 foreach(lc, parse->groupClause)
2182 {
2184 Index ref = gc->tleSortGroupRef;
2185
2186 if (ref > maxref)
2187 maxref = ref;
2188
2189 if (!gc->hashable)
2191
2192 if (!OidIsValid(gc->sortop))
2194 }
2195 }
2196
2197 /* Allocate workspace array for remapping */
2198 gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2199
2200 /*
2201 * If we have any unsortable sets, we must extract them before trying to
2202 * prepare rollups. Unsortable sets don't go through
2203 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2204 * here.
2205 */
2206 if (!bms_is_empty(gd->unsortable_refs))
2207 {
2208 List *sortable_sets = NIL;
2209 ListCell *lc;
2210
2211 foreach(lc, parse->groupingSets)
2212 {
2213 List *gset = (List *) lfirst(lc);
2214
2215 if (bms_overlap_list(gd->unsortable_refs, gset))
2216 {
2218
2219 gs->set = gset;
2221
2222 /*
2223 * We must enforce here that an unsortable set is hashable;
2224 * later code assumes this. Parse analysis only checks that
2225 * every individual column is either hashable or sortable.
2226 *
2227 * Note that passing this test doesn't guarantee we can
2228 * generate a plan; there might be other showstoppers.
2229 */
2230 if (bms_overlap_list(gd->unhashable_refs, gset))
2231 ereport(ERROR,
2232 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2233 errmsg("could not implement GROUP BY"),
2234 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2235 }
2236 else
2237 sortable_sets = lappend(sortable_sets, gset);
2238 }
2239
2240 if (sortable_sets)
2241 sets = extract_rollup_sets(sortable_sets);
2242 else
2243 sets = NIL;
2244 }
2245 else
2246 sets = extract_rollup_sets(parse->groupingSets);
2247
2248 foreach(lc_set, sets)
2249 {
2250 List *current_sets = (List *) lfirst(lc_set);
2251 RollupData *rollup = makeNode(RollupData);
2252 GroupingSetData *gs;
2253
2254 /*
2255 * Reorder the current list of grouping sets into correct prefix
2256 * order. If only one aggregation pass is needed, try to make the
2257 * list match the ORDER BY clause; if more than one pass is needed, we
2258 * don't bother with that.
2259 *
2260 * Note that this reorders the sets from smallest-member-first to
2261 * largest-member-first, and applies the GroupingSetData annotations,
2262 * though the data will be filled in later.
2263 */
2264 current_sets = reorder_grouping_sets(current_sets,
2265 (list_length(sets) == 1
2266 ? parse->sortClause
2267 : NIL));
2268
2269 /*
2270 * Get the initial (and therefore largest) grouping set.
2271 */
2272 gs = linitial_node(GroupingSetData, current_sets);
2273
2274 /*
2275 * Order the groupClause appropriately. If the first grouping set is
2276 * empty, then the groupClause must also be empty; otherwise we have
2277 * to force the groupClause to match that grouping set's order.
2278 *
2279 * (The first grouping set can be empty even though parse->groupClause
2280 * is not empty only if all non-empty grouping sets are unsortable.
2281 * The groupClauses for hashed grouping sets are built later on.)
2282 */
2283 if (gs->set)
2285 else
2286 rollup->groupClause = NIL;
2287
2288 /*
2289 * Is it hashable? We pretend empty sets are hashable even though we
2290 * actually force them not to be hashed later. But don't bother if
2291 * there's nothing but empty sets (since in that case we can't hash
2292 * anything).
2293 */
2294 if (gs->set &&
2296 {
2297 rollup->hashable = true;
2298 gd->any_hashable = true;
2299 }
2300
2301 /*
2302 * Now that we've pinned down an order for the groupClause for this
2303 * list of grouping sets, we need to remap the entries in the grouping
2304 * sets from sortgrouprefs to plain indices (0-based) into the
2305 * groupClause for this collection of grouping sets. We keep the
2306 * original form for later use, though.
2307 */
2308 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2309 current_sets,
2311 rollup->gsets_data = current_sets;
2312
2313 gd->rollups = lappend(gd->rollups, rollup);
2314 }
2315
2316 if (gd->unsortable_sets)
2317 {
2318 /*
2319 * We have not yet pinned down a groupclause for this, but we will
2320 * need index-based lists for estimation purposes. Construct
2321 * hash_sets_idx based on the entire original groupclause for now.
2322 */
2323 gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2324 gd->unsortable_sets,
2326 gd->any_hashable = true;
2327 }
2328
2329 return gd;
2330}
2331
2332/*
2333 * Given a groupclause and a list of GroupingSetData, return equivalent sets
2334 * (without annotation) mapped to indexes into the given groupclause.
2335 */
2336static List *
2338 List *gsets,
2339 int *tleref_to_colnum_map)
2340{
2341 int ref = 0;
2342 List *result = NIL;
2343 ListCell *lc;
2344
2345 foreach(lc, groupClause)
2346 {
2348
2349 tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2350 }
2351
2352 foreach(lc, gsets)
2353 {
2354 List *set = NIL;
2355 ListCell *lc2;
2357
2358 foreach(lc2, gs->set)
2359 {
2360 set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2361 }
2362
2363 result = lappend(result, set);
2364 }
2365
2366 return result;
2367}
2368
2369
2370/*
2371 * preprocess_rowmarks - set up PlanRowMarks if needed
2372 */
2373static void
2375{
2376 Query *parse = root->parse;
2377 Bitmapset *rels;
2378 List *prowmarks;
2379 ListCell *l;
2380 int i;
2381
2382 if (parse->rowMarks)
2383 {
2384 /*
2385 * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2386 * grouping, since grouping renders a reference to individual tuple
2387 * CTIDs invalid. This is also checked at parse time, but that's
2388 * insufficient because of rule substitution, query pullup, etc.
2389 */
2391 parse->rowMarks)->strength);
2392 }
2393 else
2394 {
2395 /*
2396 * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2397 * UPDATE/SHARE.
2398 */
2399 if (parse->commandType != CMD_UPDATE &&
2400 parse->commandType != CMD_DELETE &&
2401 parse->commandType != CMD_MERGE)
2402 return;
2403 }
2404
2405 /*
2406 * We need to have rowmarks for all base relations except the target. We
2407 * make a bitmapset of all base rels and then remove the items we don't
2408 * need or have FOR [KEY] UPDATE/SHARE marks for.
2409 */
2410 rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2411 if (parse->resultRelation)
2412 rels = bms_del_member(rels, parse->resultRelation);
2413
2414 /*
2415 * Convert RowMarkClauses to PlanRowMark representation.
2416 */
2417 prowmarks = NIL;
2418 foreach(l, parse->rowMarks)
2419 {
2421 RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2422 PlanRowMark *newrc;
2423
2424 /*
2425 * Currently, it is syntactically impossible to have FOR UPDATE et al
2426 * applied to an update/delete target rel. If that ever becomes
2427 * possible, we should drop the target from the PlanRowMark list.
2428 */
2429 Assert(rc->rti != parse->resultRelation);
2430
2431 /*
2432 * Ignore RowMarkClauses for subqueries; they aren't real tables and
2433 * can't support true locking. Subqueries that got flattened into the
2434 * main query should be ignored completely. Any that didn't will get
2435 * ROW_MARK_COPY items in the next loop.
2436 */
2437 if (rte->rtekind != RTE_RELATION)
2438 continue;
2439
2440 rels = bms_del_member(rels, rc->rti);
2441
2442 newrc = makeNode(PlanRowMark);
2443 newrc->rti = newrc->prti = rc->rti;
2444 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2445 newrc->markType = select_rowmark_type(rte, rc->strength);
2446 newrc->allMarkTypes = (1 << newrc->markType);
2447 newrc->strength = rc->strength;
2448 newrc->waitPolicy = rc->waitPolicy;
2449 newrc->isParent = false;
2450
2451 prowmarks = lappend(prowmarks, newrc);
2452 }
2453
2454 /*
2455 * Now, add rowmarks for any non-target, non-locked base relations.
2456 */
2457 i = 0;
2458 foreach(l, parse->rtable)
2459 {
2461 PlanRowMark *newrc;
2462
2463 i++;
2464 if (!bms_is_member(i, rels))
2465 continue;
2466
2467 newrc = makeNode(PlanRowMark);
2468 newrc->rti = newrc->prti = i;
2469 newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2470 newrc->markType = select_rowmark_type(rte, LCS_NONE);
2471 newrc->allMarkTypes = (1 << newrc->markType);
2472 newrc->strength = LCS_NONE;
2473 newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2474 newrc->isParent = false;
2475
2476 prowmarks = lappend(prowmarks, newrc);
2477 }
2478
2479 root->rowMarks = prowmarks;
2480}
2481
2482/*
2483 * Select RowMarkType to use for a given table
2484 */
2487{
2488 if (rte->rtekind != RTE_RELATION)
2489 {
2490 /* If it's not a table at all, use ROW_MARK_COPY */
2491 return ROW_MARK_COPY;
2492 }
2493 else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2494 {
2495 /* Let the FDW select the rowmark type, if it wants to */
2496 FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2497
2498 if (fdwroutine->GetForeignRowMarkType != NULL)
2499 return fdwroutine->GetForeignRowMarkType(rte, strength);
2500 /* Otherwise, use ROW_MARK_COPY by default */
2501 return ROW_MARK_COPY;
2502 }
2503 else
2504 {
2505 /* Regular table, apply the appropriate lock type */
2506 switch (strength)
2507 {
2508 case LCS_NONE:
2509
2510 /*
2511 * We don't need a tuple lock, only the ability to re-fetch
2512 * the row.
2513 */
2514 return ROW_MARK_REFERENCE;
2515 break;
2516 case LCS_FORKEYSHARE:
2517 return ROW_MARK_KEYSHARE;
2518 break;
2519 case LCS_FORSHARE:
2520 return ROW_MARK_SHARE;
2521 break;
2522 case LCS_FORNOKEYUPDATE:
2524 break;
2525 case LCS_FORUPDATE:
2526 return ROW_MARK_EXCLUSIVE;
2527 break;
2528 }
2529 elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2530 return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2531 }
2532}
2533
2534/*
2535 * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2536 *
2537 * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2538 * results back in *count_est and *offset_est. These variables are set to
2539 * 0 if the corresponding clause is not present, and -1 if it's present
2540 * but we couldn't estimate the value for it. (The "0" convention is OK
2541 * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2542 * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2543 * usual practice of never estimating less than one row.) These values will
2544 * be passed to create_limit_path, which see if you change this code.
2545 *
2546 * The return value is the suitably adjusted tuple_fraction to use for
2547 * planning the query. This adjustment is not overridable, since it reflects
2548 * plan actions that grouping_planner() will certainly take, not assumptions
2549 * about context.
2550 */
2551static double
2552preprocess_limit(PlannerInfo *root, double tuple_fraction,
2553 int64 *offset_est, int64 *count_est)
2554{
2555 Query *parse = root->parse;
2556 Node *est;
2557 double limit_fraction;
2558
2559 /* Should not be called unless LIMIT or OFFSET */
2560 Assert(parse->limitCount || parse->limitOffset);
2561
2562 /*
2563 * Try to obtain the clause values. We use estimate_expression_value
2564 * primarily because it can sometimes do something useful with Params.
2565 */
2566 if (parse->limitCount)
2567 {
2568 est = estimate_expression_value(root, parse->limitCount);
2569 if (est && IsA(est, Const))
2570 {
2571 if (((Const *) est)->constisnull)
2572 {
2573 /* NULL indicates LIMIT ALL, ie, no limit */
2574 *count_est = 0; /* treat as not present */
2575 }
2576 else
2577 {
2578 *count_est = DatumGetInt64(((Const *) est)->constvalue);
2579 if (*count_est <= 0)
2580 *count_est = 1; /* force to at least 1 */
2581 }
2582 }
2583 else
2584 *count_est = -1; /* can't estimate */
2585 }
2586 else
2587 *count_est = 0; /* not present */
2588
2589 if (parse->limitOffset)
2590 {
2591 est = estimate_expression_value(root, parse->limitOffset);
2592 if (est && IsA(est, Const))
2593 {
2594 if (((Const *) est)->constisnull)
2595 {
2596 /* Treat NULL as no offset; the executor will too */
2597 *offset_est = 0; /* treat as not present */
2598 }
2599 else
2600 {
2601 *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2602 if (*offset_est < 0)
2603 *offset_est = 0; /* treat as not present */
2604 }
2605 }
2606 else
2607 *offset_est = -1; /* can't estimate */
2608 }
2609 else
2610 *offset_est = 0; /* not present */
2611
2612 if (*count_est != 0)
2613 {
2614 /*
2615 * A LIMIT clause limits the absolute number of tuples returned.
2616 * However, if it's not a constant LIMIT then we have to guess; for
2617 * lack of a better idea, assume 10% of the plan's result is wanted.
2618 */
2619 if (*count_est < 0 || *offset_est < 0)
2620 {
2621 /* LIMIT or OFFSET is an expression ... punt ... */
2622 limit_fraction = 0.10;
2623 }
2624 else
2625 {
2626 /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2627 limit_fraction = (double) *count_est + (double) *offset_est;
2628 }
2629
2630 /*
2631 * If we have absolute limits from both caller and LIMIT, use the
2632 * smaller value; likewise if they are both fractional. If one is
2633 * fractional and the other absolute, we can't easily determine which
2634 * is smaller, but we use the heuristic that the absolute will usually
2635 * be smaller.
2636 */
2637 if (tuple_fraction >= 1.0)
2638 {
2639 if (limit_fraction >= 1.0)
2640 {
2641 /* both absolute */
2642 tuple_fraction = Min(tuple_fraction, limit_fraction);
2643 }
2644 else
2645 {
2646 /* caller absolute, limit fractional; use caller's value */
2647 }
2648 }
2649 else if (tuple_fraction > 0.0)
2650 {
2651 if (limit_fraction >= 1.0)
2652 {
2653 /* caller fractional, limit absolute; use limit */
2654 tuple_fraction = limit_fraction;
2655 }
2656 else
2657 {
2658 /* both fractional */
2659 tuple_fraction = Min(tuple_fraction, limit_fraction);
2660 }
2661 }
2662 else
2663 {
2664 /* no info from caller, just use limit */
2665 tuple_fraction = limit_fraction;
2666 }
2667 }
2668 else if (*offset_est != 0 && tuple_fraction > 0.0)
2669 {
2670 /*
2671 * We have an OFFSET but no LIMIT. This acts entirely differently
2672 * from the LIMIT case: here, we need to increase rather than decrease
2673 * the caller's tuple_fraction, because the OFFSET acts to cause more
2674 * tuples to be fetched instead of fewer. This only matters if we got
2675 * a tuple_fraction > 0, however.
2676 *
2677 * As above, use 10% if OFFSET is present but unestimatable.
2678 */
2679 if (*offset_est < 0)
2680 limit_fraction = 0.10;
2681 else
2682 limit_fraction = (double) *offset_est;
2683
2684 /*
2685 * If we have absolute counts from both caller and OFFSET, add them
2686 * together; likewise if they are both fractional. If one is
2687 * fractional and the other absolute, we want to take the larger, and
2688 * we heuristically assume that's the fractional one.
2689 */
2690 if (tuple_fraction >= 1.0)
2691 {
2692 if (limit_fraction >= 1.0)
2693 {
2694 /* both absolute, so add them together */
2695 tuple_fraction += limit_fraction;
2696 }
2697 else
2698 {
2699 /* caller absolute, limit fractional; use limit */
2700 tuple_fraction = limit_fraction;
2701 }
2702 }
2703 else
2704 {
2705 if (limit_fraction >= 1.0)
2706 {
2707 /* caller fractional, limit absolute; use caller's value */
2708 }
2709 else
2710 {
2711 /* both fractional, so add them together */
2712 tuple_fraction += limit_fraction;
2713 if (tuple_fraction >= 1.0)
2714 tuple_fraction = 0.0; /* assume fetch all */
2715 }
2716 }
2717 }
2718
2719 return tuple_fraction;
2720}
2721
2722/*
2723 * limit_needed - do we actually need a Limit plan node?
2724 *
2725 * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2726 * a Limit node. This is worth checking for because "OFFSET 0" is a common
2727 * locution for an optimization fence. (Because other places in the planner
2728 * merely check whether parse->limitOffset isn't NULL, it will still work as
2729 * an optimization fence --- we're just suppressing unnecessary run-time
2730 * overhead.)
2731 *
2732 * This might look like it could be merged into preprocess_limit, but there's
2733 * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2734 * in preprocess_limit it's good enough to consider estimated values.
2735 */
2736bool
2738{
2739 Node *node;
2740
2741 node = parse->limitCount;
2742 if (node)
2743 {
2744 if (IsA(node, Const))
2745 {
2746 /* NULL indicates LIMIT ALL, ie, no limit */
2747 if (!((Const *) node)->constisnull)
2748 return true; /* LIMIT with a constant value */
2749 }
2750 else
2751 return true; /* non-constant LIMIT */
2752 }
2753
2754 node = parse->limitOffset;
2755 if (node)
2756 {
2757 if (IsA(node, Const))
2758 {
2759 /* Treat NULL as no offset; the executor would too */
2760 if (!((Const *) node)->constisnull)
2761 {
2762 int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2763
2764 if (offset != 0)
2765 return true; /* OFFSET with a nonzero value */
2766 }
2767 }
2768 else
2769 return true; /* non-constant OFFSET */
2770 }
2771
2772 return false; /* don't need a Limit plan node */
2773}
2774
2775/*
2776 * preprocess_groupclause - do preparatory work on GROUP BY clause
2777 *
2778 * The idea here is to adjust the ordering of the GROUP BY elements
2779 * (which in itself is semantically insignificant) to match ORDER BY,
2780 * thereby allowing a single sort operation to both implement the ORDER BY
2781 * requirement and set up for a Unique step that implements GROUP BY.
2782 * We also consider partial match between GROUP BY and ORDER BY elements,
2783 * which could allow to implement ORDER BY using the incremental sort.
2784 *
2785 * We also consider other orderings of the GROUP BY elements, which could
2786 * match the sort ordering of other possible plans (eg an indexscan) and
2787 * thereby reduce cost. This is implemented during the generation of grouping
2788 * paths. See get_useful_group_keys_orderings() for details.
2789 *
2790 * Note: we need no comparable processing of the distinctClause because
2791 * the parser already enforced that that matches ORDER BY.
2792 *
2793 * Note: we return a fresh List, but its elements are the same
2794 * SortGroupClauses appearing in parse->groupClause. This is important
2795 * because later processing may modify the processed_groupClause list.
2796 *
2797 * For grouping sets, the order of items is instead forced to agree with that
2798 * of the grouping set (and items not in the grouping set are skipped). The
2799 * work of sorting the order of grouping set elements to match the ORDER BY if
2800 * possible is done elsewhere.
2801 */
2802static List *
2804{
2805 Query *parse = root->parse;
2806 List *new_groupclause = NIL;
2807 ListCell *sl;
2808 ListCell *gl;
2809
2810 /* For grouping sets, we need to force the ordering */
2811 if (force)
2812 {
2813 foreach(sl, force)
2814 {
2815 Index ref = lfirst_int(sl);
2816 SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2817
2818 new_groupclause = lappend(new_groupclause, cl);
2819 }
2820
2821 return new_groupclause;
2822 }
2823
2824 /* If no ORDER BY, nothing useful to do here */
2825 if (parse->sortClause == NIL)
2826 return list_copy(parse->groupClause);
2827
2828 /*
2829 * Scan the ORDER BY clause and construct a list of matching GROUP BY
2830 * items, but only as far as we can make a matching prefix.
2831 *
2832 * This code assumes that the sortClause contains no duplicate items.
2833 */
2834 foreach(sl, parse->sortClause)
2835 {
2837
2838 foreach(gl, parse->groupClause)
2839 {
2841
2842 if (equal(gc, sc))
2843 {
2844 new_groupclause = lappend(new_groupclause, gc);
2845 break;
2846 }
2847 }
2848 if (gl == NULL)
2849 break; /* no match, so stop scanning */
2850 }
2851
2852
2853 /* If no match at all, no point in reordering GROUP BY */
2854 if (new_groupclause == NIL)
2855 return list_copy(parse->groupClause);
2856
2857 /*
2858 * Add any remaining GROUP BY items to the new list. We don't require a
2859 * complete match, because even partial match allows ORDER BY to be
2860 * implemented using incremental sort. Also, give up if there are any
2861 * non-sortable GROUP BY items, since then there's no hope anyway.
2862 */
2863 foreach(gl, parse->groupClause)
2864 {
2866
2867 if (list_member_ptr(new_groupclause, gc))
2868 continue; /* it matched an ORDER BY item */
2869 if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2870 return list_copy(parse->groupClause);
2871 new_groupclause = lappend(new_groupclause, gc);
2872 }
2873
2874 /* Success --- install the rearranged GROUP BY list */
2875 Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2876 return new_groupclause;
2877}
2878
2879/*
2880 * Extract lists of grouping sets that can be implemented using a single
2881 * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2882 *
2883 * Input must be sorted with smallest sets first. Result has each sublist
2884 * sorted with smallest sets first.
2885 *
2886 * We want to produce the absolute minimum possible number of lists here to
2887 * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2888 * of finding the minimal partition of a partially-ordered set into chains
2889 * (which is what we need, taking the list of grouping sets as a poset ordered
2890 * by set inclusion) can be mapped to the problem of finding the maximum
2891 * cardinality matching on a bipartite graph, which is solvable in polynomial
2892 * time with a worst case of no worse than O(n^2.5) and usually much
2893 * better. Since our N is at most 4096, we don't need to consider fallbacks to
2894 * heuristic or approximate methods. (Planning time for a 12-d cube is under
2895 * half a second on my modest system even with optimization off and assertions
2896 * on.)
2897 */
2898static List *
2900{
2901 int num_sets_raw = list_length(groupingSets);
2902 int num_empty = 0;
2903 int num_sets = 0; /* distinct sets */
2904 int num_chains = 0;
2905 List *result = NIL;
2906 List **results;
2907 List **orig_sets;
2908 Bitmapset **set_masks;
2909 int *chains;
2910 short **adjacency;
2911 short *adjacency_buf;
2913 int i;
2914 int j;
2915 int j_size;
2916 ListCell *lc1 = list_head(groupingSets);
2917 ListCell *lc;
2918
2919 /*
2920 * Start by stripping out empty sets. The algorithm doesn't require this,
2921 * but the planner currently needs all empty sets to be returned in the
2922 * first list, so we strip them here and add them back after.
2923 */
2924 while (lc1 && lfirst(lc1) == NIL)
2925 {
2926 ++num_empty;
2927 lc1 = lnext(groupingSets, lc1);
2928 }
2929
2930 /* bail out now if it turns out that all we had were empty sets. */
2931 if (!lc1)
2932 return list_make1(groupingSets);
2933
2934 /*----------
2935 * We don't strictly need to remove duplicate sets here, but if we don't,
2936 * they tend to become scattered through the result, which is a bit
2937 * confusing (and irritating if we ever decide to optimize them out).
2938 * So we remove them here and add them back after.
2939 *
2940 * For each non-duplicate set, we fill in the following:
2941 *
2942 * orig_sets[i] = list of the original set lists
2943 * set_masks[i] = bitmapset for testing inclusion
2944 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2945 *
2946 * chains[i] will be the result group this set is assigned to.
2947 *
2948 * We index all of these from 1 rather than 0 because it is convenient
2949 * to leave 0 free for the NIL node in the graph algorithm.
2950 *----------
2951 */
2952 orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2953 set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2954 adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2955 adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2956
2957 j_size = 0;
2958 j = 0;
2959 i = 1;
2960
2961 for_each_cell(lc, groupingSets, lc1)
2962 {
2963 List *candidate = (List *) lfirst(lc);
2964 Bitmapset *candidate_set = NULL;
2965 ListCell *lc2;
2966 int dup_of = 0;
2967
2968 foreach(lc2, candidate)
2969 {
2970 candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2971 }
2972
2973 /* we can only be a dup if we're the same length as a previous set */
2974 if (j_size == list_length(candidate))
2975 {
2976 int k;
2977
2978 for (k = j; k < i; ++k)
2979 {
2980 if (bms_equal(set_masks[k], candidate_set))
2981 {
2982 dup_of = k;
2983 break;
2984 }
2985 }
2986 }
2987 else if (j_size < list_length(candidate))
2988 {
2989 j_size = list_length(candidate);
2990 j = i;
2991 }
2992
2993 if (dup_of > 0)
2994 {
2995 orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2996 bms_free(candidate_set);
2997 }
2998 else
2999 {
3000 int k;
3001 int n_adj = 0;
3002
3003 orig_sets[i] = list_make1(candidate);
3004 set_masks[i] = candidate_set;
3005
3006 /* fill in adjacency list; no need to compare equal-size sets */
3007
3008 for (k = j - 1; k > 0; --k)
3009 {
3010 if (bms_is_subset(set_masks[k], candidate_set))
3011 adjacency_buf[++n_adj] = k;
3012 }
3013
3014 if (n_adj > 0)
3015 {
3016 adjacency_buf[0] = n_adj;
3017 adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3018 memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3019 }
3020 else
3021 adjacency[i] = NULL;
3022
3023 ++i;
3024 }
3025 }
3026
3027 num_sets = i - 1;
3028
3029 /*
3030 * Apply the graph matching algorithm to do the work.
3031 */
3032 state = BipartiteMatch(num_sets, num_sets, adjacency);
3033
3034 /*
3035 * Now, the state->pair* fields have the info we need to assign sets to
3036 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3037 * pair_vu[v] = u (both will be true, but we check both so that we can do
3038 * it in one pass)
3039 */
3040 chains = palloc0((num_sets + 1) * sizeof(int));
3041
3042 for (i = 1; i <= num_sets; ++i)
3043 {
3044 int u = state->pair_vu[i];
3045 int v = state->pair_uv[i];
3046
3047 if (u > 0 && u < i)
3048 chains[i] = chains[u];
3049 else if (v > 0 && v < i)
3050 chains[i] = chains[v];
3051 else
3052 chains[i] = ++num_chains;
3053 }
3054
3055 /* build result lists. */
3056 results = palloc0((num_chains + 1) * sizeof(List *));
3057
3058 for (i = 1; i <= num_sets; ++i)
3059 {
3060 int c = chains[i];
3061
3062 Assert(c > 0);
3063
3064 results[c] = list_concat(results[c], orig_sets[i]);
3065 }
3066
3067 /* push any empty sets back on the first list. */
3068 while (num_empty-- > 0)
3069 results[1] = lcons(NIL, results[1]);
3070
3071 /* make result list */
3072 for (i = 1; i <= num_chains; ++i)
3073 result = lappend(result, results[i]);
3074
3075 /*
3076 * Free all the things.
3077 *
3078 * (This is over-fussy for small sets but for large sets we could have
3079 * tied up a nontrivial amount of memory.)
3080 */
3082 pfree(results);
3083 pfree(chains);
3084 for (i = 1; i <= num_sets; ++i)
3085 if (adjacency[i])
3086 pfree(adjacency[i]);
3087 pfree(adjacency);
3088 pfree(adjacency_buf);
3089 pfree(orig_sets);
3090 for (i = 1; i <= num_sets; ++i)
3091 bms_free(set_masks[i]);
3092 pfree(set_masks);
3093
3094 return result;
3095}
3096
3097/*
3098 * Reorder the elements of a list of grouping sets such that they have correct
3099 * prefix relationships. Also inserts the GroupingSetData annotations.
3100 *
3101 * The input must be ordered with smallest sets first; the result is returned
3102 * with largest sets first. Note that the result shares no list substructure
3103 * with the input, so it's safe for the caller to modify it later.
3104 *
3105 * If we're passed in a sortclause, we follow its order of columns to the
3106 * extent possible, to minimize the chance that we add unnecessary sorts.
3107 * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3108 * gets implemented in one pass.)
3109 */
3110static List *
3111reorder_grouping_sets(List *groupingSets, List *sortclause)
3112{
3113 ListCell *lc;
3114 List *previous = NIL;
3115 List *result = NIL;
3116
3117 foreach(lc, groupingSets)
3118 {
3119 List *candidate = (List *) lfirst(lc);
3120 List *new_elems = list_difference_int(candidate, previous);
3122
3123 while (list_length(sortclause) > list_length(previous) &&
3124 new_elems != NIL)
3125 {
3126 SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3127 int ref = sc->tleSortGroupRef;
3128
3129 if (list_member_int(new_elems, ref))
3130 {
3131 previous = lappend_int(previous, ref);
3132 new_elems = list_delete_int(new_elems, ref);
3133 }
3134 else
3135 {
3136 /* diverged from the sortclause; give up on it */
3137 sortclause = NIL;
3138 break;
3139 }
3140 }
3141
3142 previous = list_concat(previous, new_elems);
3143
3144 gs->set = list_copy(previous);
3145 result = lcons(gs, result);
3146 }
3147
3148 list_free(previous);
3149
3150 return result;
3151}
3152
3153/*
3154 * has_volatile_pathkey
3155 * Returns true if any PathKey in 'keys' has an EquivalenceClass
3156 * containing a volatile function. Otherwise returns false.
3157 */
3158static bool
3160{
3161 ListCell *lc;
3162
3163 foreach(lc, keys)
3164 {
3165 PathKey *pathkey = lfirst_node(PathKey, lc);
3166
3167 if (pathkey->pk_eclass->ec_has_volatile)
3168 return true;
3169 }
3170
3171 return false;
3172}
3173
3174/*
3175 * adjust_group_pathkeys_for_groupagg
3176 * Add pathkeys to root->group_pathkeys to reflect the best set of
3177 * pre-ordered input for ordered aggregates.
3178 *
3179 * We define "best" as the pathkeys that suit the largest number of
3180 * aggregate functions. We find these by looking at the first ORDER BY /
3181 * DISTINCT aggregate and take the pathkeys for that before searching for
3182 * other aggregates that require the same or a more strict variation of the
3183 * same pathkeys. We then repeat that process for any remaining aggregates
3184 * with different pathkeys and if we find another set of pathkeys that suits a
3185 * larger number of aggregates then we select those pathkeys instead.
3186 *
3187 * When the best pathkeys are found we also mark each Aggref that can use
3188 * those pathkeys as aggpresorted = true.
3189 *
3190 * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3191 * volatile functions, we never make use of these pathkeys. We want to ensure
3192 * that sorts using volatile functions are done independently in each Aggref
3193 * rather than once at the query level. If we were to allow this then Aggrefs
3194 * with compatible sort orders would all transition their rows in the same
3195 * order if those pathkeys were deemed to be the best pathkeys to sort on.
3196 * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3197 * better pathkeys to sort on, then the volatile function Aggrefs would be
3198 * left to perform their sorts individually. To avoid this inconsistent
3199 * behavior which could make Aggref results depend on what other Aggrefs the
3200 * query contains, we always force Aggrefs with volatile functions to perform
3201 * their own sorts.
3202 */
3203static void
3205{
3206 List *grouppathkeys = root->group_pathkeys;
3207 List *bestpathkeys;
3208 Bitmapset *bestaggs;
3209 Bitmapset *unprocessed_aggs;
3210 ListCell *lc;
3211 int i;
3212
3213 /* Shouldn't be here if there are grouping sets */
3214 Assert(root->parse->groupingSets == NIL);
3215 /* Shouldn't be here unless there are some ordered aggregates */
3216 Assert(root->numOrderedAggs > 0);
3217
3218 /* Do nothing if disabled */
3220 return;
3221
3222 /*
3223 * Make a first pass over all AggInfos to collect a Bitmapset containing
3224 * the indexes of all AggInfos to be processed below.
3225 */
3226 unprocessed_aggs = NULL;
3227 foreach(lc, root->agginfos)
3228 {
3229 AggInfo *agginfo = lfirst_node(AggInfo, lc);
3230 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3231
3232 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3233 continue;
3234
3235 /* Skip unless there's a DISTINCT or ORDER BY clause */
3236 if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3237 continue;
3238
3239 /* Additional safety checks are needed if there's a FILTER clause */
3240 if (aggref->aggfilter != NULL)
3241 {
3242 ListCell *lc2;
3243 bool allow_presort = true;
3244
3245 /*
3246 * When the Aggref has a FILTER clause, it's possible that the
3247 * filter removes rows that cannot be sorted because the
3248 * expression to sort by results in an error during its
3249 * evaluation. This is a problem for presorting as that happens
3250 * before the FILTER, whereas without presorting, the Aggregate
3251 * node will apply the FILTER *before* sorting. So that we never
3252 * try to sort anything that might error, here we aim to skip over
3253 * any Aggrefs with arguments with expressions which, when
3254 * evaluated, could cause an ERROR. Vars and Consts are ok. There
3255 * may be more cases that should be allowed, but more thought
3256 * needs to be given. Err on the side of caution.
3257 */
3258 foreach(lc2, aggref->args)
3259 {
3260 TargetEntry *tle = (TargetEntry *) lfirst(lc2);
3261 Expr *expr = tle->expr;
3262
3263 while (IsA(expr, RelabelType))
3264 expr = (Expr *) (castNode(RelabelType, expr))->arg;
3265
3266 /* Common case, Vars and Consts are ok */
3267 if (IsA(expr, Var) || IsA(expr, Const))
3268 continue;
3269
3270 /* Unsupported. Don't try to presort for this Aggref */
3271 allow_presort = false;
3272 break;
3273 }
3274
3275 /* Skip unsupported Aggrefs */
3276 if (!allow_presort)
3277 continue;
3278 }
3279
3280 unprocessed_aggs = bms_add_member(unprocessed_aggs,
3282 }
3283
3284 /*
3285 * Now process all the unprocessed_aggs to find the best set of pathkeys
3286 * for the given set of aggregates.
3287 *
3288 * On the first outer loop here 'bestaggs' will be empty. We'll populate
3289 * this during the first loop using the pathkeys for the very first
3290 * AggInfo then taking any stronger pathkeys from any other AggInfos with
3291 * a more strict set of compatible pathkeys. Once the outer loop is
3292 * complete, we mark off all the aggregates with compatible pathkeys then
3293 * remove those from the unprocessed_aggs and repeat the process to try to
3294 * find another set of pathkeys that are suitable for a larger number of
3295 * aggregates. The outer loop will stop when there are not enough
3296 * unprocessed aggregates for it to be possible to find a set of pathkeys
3297 * to suit a larger number of aggregates.
3298 */
3299 bestpathkeys = NIL;
3300 bestaggs = NULL;
3301 while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3302 {
3303 Bitmapset *aggindexes = NULL;
3304 List *currpathkeys = NIL;
3305
3306 i = -1;
3307 while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3308 {
3309 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3310 Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3311 List *sortlist;
3312 List *pathkeys;
3313
3314 if (aggref->aggdistinct != NIL)
3315 sortlist = aggref->aggdistinct;
3316 else
3317 sortlist = aggref->aggorder;
3318
3319 pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3320 aggref->args);
3321
3322 /*
3323 * Ignore Aggrefs which have volatile functions in their ORDER BY
3324 * or DISTINCT clause.
3325 */
3326 if (has_volatile_pathkey(pathkeys))
3327 {
3328 unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3329 continue;
3330 }
3331
3332 /*
3333 * When not set yet, take the pathkeys from the first unprocessed
3334 * aggregate.
3335 */
3336 if (currpathkeys == NIL)
3337 {
3338 currpathkeys = pathkeys;
3339
3340 /* include the GROUP BY pathkeys, if they exist */
3341 if (grouppathkeys != NIL)
3342 currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3343 currpathkeys);
3344
3345 /* record that we found pathkeys for this aggregate */
3346 aggindexes = bms_add_member(aggindexes, i);
3347 }
3348 else
3349 {
3350 /* now look for a stronger set of matching pathkeys */
3351
3352 /* include the GROUP BY pathkeys, if they exist */
3353 if (grouppathkeys != NIL)
3354 pathkeys = append_pathkeys(list_copy(grouppathkeys),
3355 pathkeys);
3356
3357 /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3358 switch (compare_pathkeys(currpathkeys, pathkeys))
3359 {
3360 case PATHKEYS_BETTER2:
3361 /* 'pathkeys' are stronger, use these ones instead */
3362 currpathkeys = pathkeys;
3363 /* FALLTHROUGH */
3364
3365 case PATHKEYS_BETTER1:
3366 /* 'pathkeys' are less strict */
3367 /* FALLTHROUGH */
3368
3369 case PATHKEYS_EQUAL:
3370 /* mark this aggregate as covered by 'currpathkeys' */
3371 aggindexes = bms_add_member(aggindexes, i);
3372 break;
3373
3374 case PATHKEYS_DIFFERENT:
3375 break;
3376 }
3377 }
3378 }
3379
3380 /* remove the aggregates that we've just processed */
3381 unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3382
3383 /*
3384 * If this pass included more aggregates than the previous best then
3385 * use these ones as the best set.
3386 */
3387 if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3388 {
3389 bestaggs = aggindexes;
3390 bestpathkeys = currpathkeys;
3391 }
3392 }
3393
3394 /*
3395 * If we found any ordered aggregates, update root->group_pathkeys to add
3396 * the best set of aggregate pathkeys. Note that bestpathkeys includes
3397 * the original GROUP BY pathkeys already.
3398 */
3399 if (bestpathkeys != NIL)
3400 root->group_pathkeys = bestpathkeys;
3401
3402 /*
3403 * Now that we've found the best set of aggregates we can set the
3404 * presorted flag to indicate to the executor that it needn't bother
3405 * performing a sort for these Aggrefs. We're able to do this now as
3406 * there's no chance of a Hash Aggregate plan as create_grouping_paths
3407 * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3408 * of ordered aggregates.
3409 */
3410 i = -1;
3411 while ((i = bms_next_member(bestaggs, i)) >= 0)
3412 {
3413 AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3414
3415 foreach(lc, agginfo->aggrefs)
3416 {
3417 Aggref *aggref = lfirst_node(Aggref, lc);
3418
3419 aggref->aggpresorted = true;
3420 }
3421 }
3422}
3423
3424/*
3425 * Compute query_pathkeys and other pathkeys during plan generation
3426 */
3427static void
3429{
3430 Query *parse = root->parse;
3431 standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3432 List *tlist = root->processed_tlist;
3433 List *activeWindows = qp_extra->activeWindows;
3434
3435 /*
3436 * Calculate pathkeys that represent grouping/ordering and/or ordered
3437 * aggregate requirements.
3438 */
3439 if (qp_extra->gset_data)
3440 {
3441 /*
3442 * With grouping sets, just use the first RollupData's groupClause. We
3443 * don't make any effort to optimize grouping clauses when there are
3444 * grouping sets, nor can we combine aggregate ordering keys with
3445 * grouping.
3446 */
3447 List *rollups = qp_extra->gset_data->rollups;
3448 List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3449
3450 if (grouping_is_sortable(groupClause))
3451 {
3452 bool sortable;
3453
3454 /*
3455 * The groupClause is logically below the grouping step. So if
3456 * there is an RTE entry for the grouping step, we need to remove
3457 * its RT index from the sort expressions before we make PathKeys
3458 * for them.
3459 */
3460 root->group_pathkeys =
3462 &groupClause,
3463 tlist,
3464 false,
3465 parse->hasGroupRTE,
3466 &sortable,
3467 false);
3468 Assert(sortable);
3469 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3470 }
3471 else
3472 {
3473 root->group_pathkeys = NIL;
3474 root->num_groupby_pathkeys = 0;
3475 }
3476 }
3477 else if (parse->groupClause || root->numOrderedAggs > 0)
3478 {
3479 /*
3480 * With a plain GROUP BY list, we can remove any grouping items that
3481 * are proven redundant by EquivalenceClass processing. For example,
3482 * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3483 * especially common cases, but they're nearly free to detect. Note
3484 * that we remove redundant items from processed_groupClause but not
3485 * the original parse->groupClause.
3486 */
3487 bool sortable;
3488
3489 /*
3490 * Convert group clauses into pathkeys. Set the ec_sortref field of
3491 * EquivalenceClass'es if it's not set yet.
3492 */
3493 root->group_pathkeys =
3495 &root->processed_groupClause,
3496 tlist,
3497 true,
3498 false,
3499 &sortable,
3500 true);
3501 if (!sortable)
3502 {
3503 /* Can't sort; no point in considering aggregate ordering either */
3504 root->group_pathkeys = NIL;
3505 root->num_groupby_pathkeys = 0;
3506 }
3507 else
3508 {
3509 root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3510 /* If we have ordered aggs, consider adding onto group_pathkeys */
3511 if (root->numOrderedAggs > 0)
3513 }
3514 }
3515 else
3516 {
3517 root->group_pathkeys = NIL;
3518 root->num_groupby_pathkeys = 0;
3519 }
3520
3521 /* We consider only the first (bottom) window in pathkeys logic */
3522 if (activeWindows != NIL)
3523 {
3524 WindowClause *wc = linitial_node(WindowClause, activeWindows);
3525
3526 root->window_pathkeys = make_pathkeys_for_window(root,
3527 wc,
3528 tlist);
3529 }
3530 else
3531 root->window_pathkeys = NIL;
3532
3533 /*
3534 * As with GROUP BY, we can discard any DISTINCT items that are proven
3535 * redundant by EquivalenceClass processing. The non-redundant list is
3536 * kept in root->processed_distinctClause, leaving the original
3537 * parse->distinctClause alone.
3538 */
3539 if (parse->distinctClause)
3540 {
3541 bool sortable;
3542
3543 /* Make a copy since pathkey processing can modify the list */
3544 root->processed_distinctClause = list_copy(parse->distinctClause);
3545 root->distinct_pathkeys =
3547 &root->processed_distinctClause,
3548 tlist,
3549 true,
3550 false,
3551 &sortable,
3552 false);
3553 if (!sortable)
3554 root->distinct_pathkeys = NIL;
3555 }
3556 else
3557 root->distinct_pathkeys = NIL;
3558
3559 root->sort_pathkeys =
3561 parse->sortClause,
3562 tlist);
3563
3564 /* setting setop_pathkeys might be useful to the union planner */
3565 if (qp_extra->setop != NULL)
3566 {
3567 List *groupClauses;
3568 bool sortable;
3569
3570 groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3571
3572 root->setop_pathkeys =
3574 &groupClauses,
3575 tlist,
3576 false,
3577 false,
3578 &sortable,
3579 false);
3580 if (!sortable)
3581 root->setop_pathkeys = NIL;
3582 }
3583 else
3584 root->setop_pathkeys = NIL;
3585
3586 /*
3587 * Figure out whether we want a sorted result from query_planner.
3588 *
3589 * If we have a sortable GROUP BY clause, then we want a result sorted
3590 * properly for grouping. Otherwise, if we have window functions to
3591 * evaluate, we try to sort for the first window. Otherwise, if there's a
3592 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3593 * we try to produce output that's sufficiently well sorted for the
3594 * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3595 * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3596 * for a set operation which can benefit from presorted results and have a
3597 * sortable targetlist, we want to sort by the target list.
3598 *
3599 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3600 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3601 * that might just leave us failing to exploit an available sort order at
3602 * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3603 * much easier, since we know that the parser ensured that one is a
3604 * superset of the other.
3605 */
3606 if (root->group_pathkeys)
3607 root->query_pathkeys = root->group_pathkeys;
3608 else if (root->window_pathkeys)
3609 root->query_pathkeys = root->window_pathkeys;
3610 else if (list_length(root->distinct_pathkeys) >
3611 list_length(root->sort_pathkeys))
3612 root->query_pathkeys = root->distinct_pathkeys;
3613 else if (root->sort_pathkeys)
3614 root->query_pathkeys = root->sort_pathkeys;
3615 else if (root->setop_pathkeys != NIL)
3616 root->query_pathkeys = root->setop_pathkeys;
3617 else
3618 root->query_pathkeys = NIL;
3619}
3620
3621/*
3622 * Estimate number of groups produced by grouping clauses (1 if not grouping)
3623 *
3624 * path_rows: number of output rows from scan/join step
3625 * gd: grouping sets data including list of grouping sets and their clauses
3626 * target_list: target list containing group clause references
3627 *
3628 * If doing grouping sets, we also annotate the gsets data with the estimates
3629 * for each set and each individual rollup list, with a view to later
3630 * determining whether some combination of them could be hashed instead.
3631 */
3632static double
3634 double path_rows,
3636 List *target_list)
3637{
3638 Query *parse = root->parse;
3639 double dNumGroups;
3640
3641 if (parse->groupClause)
3642 {
3643 List *groupExprs;
3644
3645 if (parse->groupingSets)
3646 {
3647 /* Add up the estimates for each grouping set */
3648 ListCell *lc;
3649
3650 Assert(gd); /* keep Coverity happy */
3651
3652 dNumGroups = 0;
3653
3654 foreach(lc, gd->rollups)
3655 {
3656 RollupData *rollup = lfirst_node(RollupData, lc);
3657 ListCell *lc2;
3658 ListCell *lc3;
3659
3660 groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3661 target_list);
3662
3663 rollup->numGroups = 0.0;
3664
3665 forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3666 {
3667 List *gset = (List *) lfirst(lc2);
3669 double numGroups = estimate_num_groups(root,
3670 groupExprs,
3671 path_rows,
3672 &gset,
3673 NULL);
3674
3675 gs->numGroups = numGroups;
3676 rollup->numGroups += numGroups;
3677 }
3678
3679 dNumGroups += rollup->numGroups;
3680 }
3681
3682 if (gd->hash_sets_idx)
3683 {
3684 ListCell *lc2;
3685
3686 gd->dNumHashGroups = 0;
3687
3688 groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3689 target_list);
3690
3691 forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3692 {
3693 List *gset = (List *) lfirst(lc);
3695 double numGroups = estimate_num_groups(root,
3696 groupExprs,
3697 path_rows,
3698 &gset,
3699 NULL);
3700
3701 gs->numGroups = numGroups;
3702 gd->dNumHashGroups += numGroups;
3703 }
3704
3705 dNumGroups += gd->dNumHashGroups;
3706 }
3707 }
3708 else
3709 {
3710 /* Plain GROUP BY -- estimate based on optimized groupClause */
3711 groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3712 target_list);
3713
3714 dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3715 NULL, NULL);
3716 }
3717 }
3718 else if (parse->groupingSets)
3719 {
3720 /* Empty grouping sets ... one result row for each one */
3721 dNumGroups = list_length(parse->groupingSets);
3722 }
3723 else if (parse->hasAggs || root->hasHavingQual)
3724 {
3725 /* Plain aggregation, one result row */
3726 dNumGroups = 1;
3727 }
3728 else
3729 {
3730 /* Not grouping */
3731 dNumGroups = 1;
3732 }
3733
3734 return dNumGroups;
3735}
3736
3737/*
3738 * create_grouping_paths
3739 *
3740 * Build a new upperrel containing Paths for grouping and/or aggregation.
3741 * Along the way, we also build an upperrel for Paths which are partially
3742 * grouped and/or aggregated. A partially grouped and/or aggregated path
3743 * needs a FinalizeAggregate node to complete the aggregation. Currently,
3744 * the only partially grouped paths we build are also partial paths; that
3745 * is, they need a Gather and then a FinalizeAggregate.
3746 *
3747 * input_rel: contains the source-data Paths
3748 * target: the pathtarget for the result Paths to compute
3749 * gd: grouping sets data including list of grouping sets and their clauses
3750 *
3751 * Note: all Paths in input_rel are expected to return the target computed
3752 * by make_group_input_target.
3753 */
3754static RelOptInfo *
3756 RelOptInfo *input_rel,
3757 PathTarget *target,
3758 bool target_parallel_safe,
3760{
3761 Query *parse = root->parse;
3762 RelOptInfo *grouped_rel;
3763 RelOptInfo *partially_grouped_rel;
3764 AggClauseCosts agg_costs;
3765
3766 MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3768
3769 /*
3770 * Create grouping relation to hold fully aggregated grouping and/or
3771 * aggregation paths.
3772 */
3773 grouped_rel = make_grouping_rel(root, input_rel, target,
3774 target_parallel_safe, parse->havingQual);
3775
3776 /*
3777 * Create either paths for a degenerate grouping or paths for ordinary
3778 * grouping, as appropriate.
3779 */
3781 create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3782 else
3783 {
3784 int flags = 0;
3785 GroupPathExtraData extra;
3786
3787 /*
3788 * Determine whether it's possible to perform sort-based
3789 * implementations of grouping. (Note that if processed_groupClause
3790 * is empty, grouping_is_sortable() is trivially true, and all the
3791 * pathkeys_contained_in() tests will succeed too, so that we'll
3792 * consider every surviving input path.)
3793 *
3794 * If we have grouping sets, we might be able to sort some but not all
3795 * of them; in this case, we need can_sort to be true as long as we
3796 * must consider any sorted-input plan.
3797 */
3798 if ((gd && gd->rollups != NIL)
3799 || grouping_is_sortable(root->processed_groupClause))
3800 flags |= GROUPING_CAN_USE_SORT;
3801
3802 /*
3803 * Determine whether we should consider hash-based implementations of
3804 * grouping.
3805 *
3806 * Hashed aggregation only applies if we're grouping. If we have
3807 * grouping sets, some groups might be hashable but others not; in
3808 * this case we set can_hash true as long as there is nothing globally
3809 * preventing us from hashing (and we should therefore consider plans
3810 * with hashes).
3811 *
3812 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3813 * BY aggregates. (Doing so would imply storing *all* the input
3814 * values in the hash table, and/or running many sorts in parallel,
3815 * either of which seems like a certain loser.) We similarly don't
3816 * support ordered-set aggregates in hashed aggregation, but that case
3817 * is also included in the numOrderedAggs count.
3818 *
3819 * Note: grouping_is_hashable() is much more expensive to check than
3820 * the other gating conditions, so we want to do it last.
3821 */
3822 if ((parse->groupClause != NIL &&
3823 root->numOrderedAggs == 0 &&
3824 (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3825 flags |= GROUPING_CAN_USE_HASH;
3826
3827 /*
3828 * Determine whether partial aggregation is possible.
3829 */
3830 if (can_partial_agg(root))
3831 flags |= GROUPING_CAN_PARTIAL_AGG;
3832
3833 extra.flags = flags;
3834 extra.target_parallel_safe = target_parallel_safe;
3835 extra.havingQual = parse->havingQual;
3836 extra.targetList = parse->targetList;
3837 extra.partial_costs_set = false;
3838
3839 /*
3840 * Determine whether partitionwise aggregation is in theory possible.
3841 * It can be disabled by the user, and for now, we don't try to
3842 * support grouping sets. create_ordinary_grouping_paths() will check
3843 * additional conditions, such as whether input_rel is partitioned.
3844 */
3845 if (enable_partitionwise_aggregate && !parse->groupingSets)
3847 else
3849
3850 create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3851 &agg_costs, gd, &extra,
3852 &partially_grouped_rel);
3853 }
3854
3855 set_cheapest(grouped_rel);
3856 return grouped_rel;
3857}
3858
3859/*
3860 * make_grouping_rel
3861 *
3862 * Create a new grouping rel and set basic properties.
3863 *
3864 * input_rel represents the underlying scan/join relation.
3865 * target is the output expected from the grouping relation.
3866 */
3867static RelOptInfo *
3869 PathTarget *target, bool target_parallel_safe,
3870 Node *havingQual)
3871{
3872 RelOptInfo *grouped_rel;
3873
3874 if (IS_OTHER_REL(input_rel))
3875 {
3877 input_rel->relids);
3878 grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3879 }
3880 else
3881 {
3882 /*
3883 * By tradition, the relids set for the main grouping relation is
3884 * NULL. (This could be changed, but might require adjustments
3885 * elsewhere.)
3886 */
3887 grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3888 }
3889
3890 /* Set target. */
3891 grouped_rel->reltarget = target;
3892
3893 /*
3894 * If the input relation is not parallel-safe, then the grouped relation
3895 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3896 * target list and HAVING quals are parallel-safe.
3897 */
3898 if (input_rel->consider_parallel && target_parallel_safe &&
3899 is_parallel_safe(root, (Node *) havingQual))
3900 grouped_rel->consider_parallel = true;
3901
3902 /*
3903 * If the input rel belongs to a single FDW, so does the grouped rel.
3904 */
3905 grouped_rel->serverid = input_rel->serverid;
3906 grouped_rel->userid = input_rel->userid;
3907 grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3908 grouped_rel->fdwroutine = input_rel->fdwroutine;
3909
3910 return grouped_rel;
3911}
3912
3913/*
3914 * is_degenerate_grouping
3915 *
3916 * A degenerate grouping is one in which the query has a HAVING qual and/or
3917 * grouping sets, but no aggregates and no GROUP BY (which implies that the
3918 * grouping sets are all empty).
3919 */
3920static bool
3922{
3923 Query *parse = root->parse;
3924
3925 return (root->hasHavingQual || parse->groupingSets) &&
3926 !parse->hasAggs && parse->groupClause == NIL;
3927}
3928
3929/*
3930 * create_degenerate_grouping_paths
3931 *
3932 * When the grouping is degenerate (see is_degenerate_grouping), we are
3933 * supposed to emit either zero or one row for each grouping set depending on
3934 * whether HAVING succeeds. Furthermore, there cannot be any variables in
3935 * either HAVING or the targetlist, so we actually do not need the FROM table
3936 * at all! We can just throw away the plan-so-far and generate a Result node.
3937 * This is a sufficiently unusual corner case that it's not worth contorting
3938 * the structure of this module to avoid having to generate the earlier paths
3939 * in the first place.
3940 */
3941static void
3943 RelOptInfo *grouped_rel)
3944{
3945 Query *parse = root->parse;
3946 int nrows;
3947 Path *path;
3948
3949 nrows = list_length(parse->groupingSets);
3950 if (nrows > 1)
3951 {
3952 /*
3953 * Doesn't seem worthwhile writing code to cons up a generate_series
3954 * or a values scan to emit multiple rows. Instead just make N clones
3955 * and append them. (With a volatile HAVING clause, this means you
3956 * might get between 0 and N output rows. Offhand I think that's
3957 * desired.)
3958 */
3959 List *paths = NIL;
3960
3961 while (--nrows >= 0)
3962 {
3963 path = (Path *)
3964 create_group_result_path(root, grouped_rel,
3965 grouped_rel->reltarget,
3966 (List *) parse->havingQual);
3967 paths = lappend(paths, path);
3968 }
3969 path = (Path *)
3971 grouped_rel,
3972 paths,
3973 NIL,
3974 NIL,
3975 NULL,
3976 0,
3977 false,
3978 -1);
3979 }
3980 else
3981 {
3982 /* No grouping sets, or just one, so one output row */
3983 path = (Path *)
3984 create_group_result_path(root, grouped_rel,
3985 grouped_rel->reltarget,
3986 (List *) parse->havingQual);
3987 }
3988
3989 add_path(grouped_rel, path);
3990}
3991
3992/*
3993 * create_ordinary_grouping_paths
3994 *
3995 * Create grouping paths for the ordinary (that is, non-degenerate) case.
3996 *
3997 * We need to consider sorted and hashed aggregation in the same function,
3998 * because otherwise (1) it would be harder to throw an appropriate error
3999 * message if neither way works, and (2) we should not allow hashtable size
4000 * considerations to dissuade us from using hashing if sorting is not possible.
4001 *
4002 * *partially_grouped_rel_p will be set to the partially grouped rel which this
4003 * function creates, or to NULL if it doesn't create one.
4004 */
4005static void
4007 RelOptInfo *grouped_rel,
4008 const AggClauseCosts *agg_costs,
4010 GroupPathExtraData *extra,
4011 RelOptInfo **partially_grouped_rel_p)
4012{
4013 Path *cheapest_path = input_rel->cheapest_total_path;
4014 RelOptInfo *partially_grouped_rel = NULL;
4015 double dNumGroups;
4017
4018 /*
4019 * If this is the topmost grouping relation or if the parent relation is
4020 * doing some form of partitionwise aggregation, then we may be able to do
4021 * it at this level also. However, if the input relation is not
4022 * partitioned, partitionwise aggregate is impossible.
4023 */
4024 if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4025 IS_PARTITIONED_REL(input_rel))
4026 {
4027 /*
4028 * If this is the topmost relation or if the parent relation is doing
4029 * full partitionwise aggregation, then we can do full partitionwise
4030 * aggregation provided that the GROUP BY clause contains all of the
4031 * partitioning columns at this level and the collation used by GROUP
4032 * BY matches the partitioning collation. Otherwise, we can do at
4033 * most partial partitionwise aggregation. But if partial aggregation
4034 * is not supported in general then we can't use it for partitionwise
4035 * aggregation either.
4036 *
4037 * Check parse->groupClause not processed_groupClause, because it's
4038 * okay if some of the partitioning columns were proved redundant.
4039 */
4040 if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4041 group_by_has_partkey(input_rel, extra->targetList,
4042 root->parse->groupClause))
4044 else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4046 else
4048 }
4049
4050 /*
4051 * Before generating paths for grouped_rel, we first generate any possible
4052 * partially grouped paths; that way, later code can easily consider both
4053 * parallel and non-parallel approaches to grouping.
4054 */
4055 if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4056 {
4057 bool force_rel_creation;
4058
4059 /*
4060 * If we're doing partitionwise aggregation at this level, force
4061 * creation of a partially_grouped_rel so we can add partitionwise
4062 * paths to it.
4063 */
4064 force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4065
4066 partially_grouped_rel =
4068 grouped_rel,
4069 input_rel,
4070 gd,
4071 extra,
4072 force_rel_creation);
4073 }
4074
4075 /* Set out parameter. */
4076 *partially_grouped_rel_p = partially_grouped_rel;
4077
4078 /* Apply partitionwise aggregation technique, if possible. */
4079 if (patype != PARTITIONWISE_AGGREGATE_NONE)
4080 create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4081 partially_grouped_rel, agg_costs,
4082 gd, patype, extra);
4083
4084 /* If we are doing partial aggregation only, return. */
4086 {
4087 Assert(partially_grouped_rel);
4088
4089 if (partially_grouped_rel->pathlist)
4090 set_cheapest(partially_grouped_rel);
4091
4092 return;
4093 }
4094
4095 /* Gather any partially grouped partial paths. */
4096 if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4097 {
4098 gather_grouping_paths(root, partially_grouped_rel);
4099 set_cheapest(partially_grouped_rel);
4100 }
4101
4102 /*
4103 * Estimate number of groups.
4104 */
4105 dNumGroups = get_number_of_groups(root,
4106 cheapest_path->rows,
4107 gd,
4108 extra->targetList);
4109
4110 /* Build final grouping paths */
4111 add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4112 partially_grouped_rel, agg_costs, gd,
4113 dNumGroups, extra);
4114
4115 /* Give a helpful error if we failed to find any implementation */
4116 if (grouped_rel->pathlist == NIL)
4117 ereport(ERROR,
4118 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4119 errmsg("could not implement GROUP BY"),
4120 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4121
4122 /*
4123 * If there is an FDW that's responsible for all baserels of the query,
4124 * let it consider adding ForeignPaths.
4125 */
4126 if (grouped_rel->fdwroutine &&
4127 grouped_rel->fdwroutine->GetForeignUpperPaths)
4128 grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4129 input_rel, grouped_rel,
4130 extra);
4131
4132 /* Let extensions possibly add some more paths */
4134 (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4135 input_rel, grouped_rel,
4136 extra);
4137}
4138
4139/*
4140 * For a given input path, consider the possible ways of doing grouping sets on
4141 * it, by combinations of hashing and sorting. This can be called multiple
4142 * times, so it's important that it not scribble on input. No result is
4143 * returned, but any generated paths are added to grouped_rel.
4144 */
4145static void
4147 RelOptInfo *grouped_rel,
4148 Path *path,
4149 bool is_sorted,
4150 bool can_hash,
4152 const AggClauseCosts *agg_costs,
4153 double dNumGroups)
4154{
4155 Query *parse = root->parse;
4156 Size hash_mem_limit = get_hash_memory_limit();
4157
4158 /*
4159 * If we're not being offered sorted input, then only consider plans that
4160 * can be done entirely by hashing.
4161 *
4162 * We can hash everything if it looks like it'll fit in hash_mem. But if
4163 * the input is actually sorted despite not being advertised as such, we
4164 * prefer to make use of that in order to use less memory.
4165 *
4166 * If none of the grouping sets are sortable, then ignore the hash_mem
4167 * limit and generate a path anyway, since otherwise we'll just fail.
4168 */
4169 if (!is_sorted)
4170 {
4171 List *new_rollups = NIL;
4172 RollupData *unhashed_rollup = NULL;
4173 List *sets_data;
4174 List *empty_sets_data = NIL;
4175 List *empty_sets = NIL;
4176 ListCell *lc;
4177 ListCell *l_start = list_head(gd->rollups);
4178 AggStrategy strat = AGG_HASHED;
4179 double hashsize;
4180 double exclude_groups = 0.0;
4181
4182 Assert(can_hash);
4183
4184 /*
4185 * If the input is coincidentally sorted usefully (which can happen
4186 * even if is_sorted is false, since that only means that our caller
4187 * has set up the sorting for us), then save some hashtable space by
4188 * making use of that. But we need to watch out for degenerate cases:
4189 *
4190 * 1) If there are any empty grouping sets, then group_pathkeys might
4191 * be NIL if all non-empty grouping sets are unsortable. In this case,
4192 * there will be a rollup containing only empty groups, and the
4193 * pathkeys_contained_in test is vacuously true; this is ok.
4194 *
4195 * XXX: the above relies on the fact that group_pathkeys is generated
4196 * from the first rollup. If we add the ability to consider multiple
4197 * sort orders for grouping input, this assumption might fail.
4198 *
4199 * 2) If there are no empty sets and only unsortable sets, then the
4200 * rollups list will be empty (and thus l_start == NULL), and
4201 * group_pathkeys will be NIL; we must ensure that the vacuously-true
4202 * pathkeys_contained_in test doesn't cause us to crash.
4203 */
4204 if (l_start != NULL &&
4205 pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4206 {
4207 unhashed_rollup = lfirst_node(RollupData, l_start);
4208 exclude_groups = unhashed_rollup->numGroups;
4209 l_start = lnext(gd->rollups, l_start);
4210 }
4211
4213 path,
4214 agg_costs,
4215 dNumGroups - exclude_groups);
4216
4217 /*
4218 * gd->rollups is empty if we have only unsortable columns to work
4219 * with. Override hash_mem in that case; otherwise, we'll rely on the
4220 * sorted-input case to generate usable mixed paths.
4221 */
4222 if (hashsize > hash_mem_limit && gd->rollups)
4223 return; /* nope, won't fit */
4224
4225 /*
4226 * We need to burst the existing rollups list into individual grouping
4227 * sets and recompute a groupClause for each set.
4228 */
4229 sets_data = list_copy(gd->unsortable_sets);
4230
4231 for_each_cell(lc, gd->rollups, l_start)
4232 {
4233 RollupData *rollup = lfirst_node(RollupData, lc);
4234
4235 /*
4236 * If we find an unhashable rollup that's not been skipped by the
4237 * "actually sorted" check above, we can't cope; we'd need sorted
4238 * input (with a different sort order) but we can't get that here.
4239 * So bail out; we'll get a valid path from the is_sorted case
4240 * instead.
4241 *
4242 * The mere presence of empty grouping sets doesn't make a rollup
4243 * unhashable (see preprocess_grouping_sets), we handle those
4244 * specially below.
4245 */
4246 if (!rollup->hashable)
4247 return;
4248
4249 sets_data = list_concat(sets_data, rollup->gsets_data);
4250 }
4251 foreach(lc, sets_data)
4252 {
4254 List *gset = gs->set;
4255 RollupData *rollup;
4256
4257 if (gset == NIL)
4258 {
4259 /* Empty grouping sets can't be hashed. */
4260 empty_sets_data = lappend(empty_sets_data, gs);
4261 empty_sets = lappend(empty_sets, NIL);
4262 }
4263 else
4264 {
4265 rollup = makeNode(RollupData);
4266
4267 rollup->groupClause = preprocess_groupclause(root, gset);
4268 rollup->gsets_data = list_make1(gs);
4269 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4270 rollup->gsets_data,
4272 rollup->numGroups = gs->numGroups;
4273 rollup->hashable = true;
4274 rollup->is_hashed = true;
4275 new_rollups = lappend(new_rollups, rollup);
4276 }
4277 }
4278
4279 /*
4280 * If we didn't find anything nonempty to hash, then bail. We'll
4281 * generate a path from the is_sorted case.
4282 */
4283 if (new_rollups == NIL)
4284 return;
4285
4286 /*
4287 * If there were empty grouping sets they should have been in the
4288 * first rollup.
4289 */
4290 Assert(!unhashed_rollup || !empty_sets);
4291
4292 if (unhashed_rollup)
4293 {
4294 new_rollups = lappend(new_rollups, unhashed_rollup);
4295 strat = AGG_MIXED;
4296 }
4297 else if (empty_sets)
4298 {
4299 RollupData *rollup = makeNode(RollupData);
4300
4301 rollup->groupClause = NIL;
4302 rollup->gsets_data = empty_sets_data;
4303 rollup->gsets = empty_sets;
4304 rollup->numGroups = list_length(empty_sets);
4305 rollup->hashable = false;
4306 rollup->is_hashed = false;
4307 new_rollups = lappend(new_rollups, rollup);
4308 strat = AGG_MIXED;
4309 }
4310
4311 add_path(grouped_rel, (Path *)
4313 grouped_rel,
4314 path,
4315 (List *) parse->havingQual,
4316 strat,
4317 new_rollups,
4318 agg_costs));
4319 return;
4320 }
4321
4322 /*
4323 * If we have sorted input but nothing we can do with it, bail.
4324 */
4325 if (gd->rollups == NIL)
4326 return;
4327
4328 /*
4329 * Given sorted input, we try and make two paths: one sorted and one mixed
4330 * sort/hash. (We need to try both because hashagg might be disabled, or
4331 * some columns might not be sortable.)
4332 *
4333 * can_hash is passed in as false if some obstacle elsewhere (such as
4334 * ordered aggs) means that we shouldn't consider hashing at all.
4335 */
4336 if (can_hash && gd->any_hashable)
4337 {
4338 List *rollups = NIL;
4339 List *hash_sets = list_copy(gd->unsortable_sets);
4340 double availspace = hash_mem_limit;
4341 ListCell *lc;
4342
4343 /*
4344 * Account first for space needed for groups we can't sort at all.
4345 */
4346 availspace -= estimate_hashagg_tablesize(root,
4347 path,
4348 agg_costs,
4349 gd->dNumHashGroups);
4350
4351 if (availspace > 0 && list_length(gd->rollups) > 1)
4352 {
4353 double scale;
4354 int num_rollups = list_length(gd->rollups);
4355 int k_capacity;
4356 int *k_weights = palloc(num_rollups * sizeof(int));
4357 Bitmapset *hash_items = NULL;
4358 int i;
4359
4360 /*
4361 * We treat this as a knapsack problem: the knapsack capacity
4362 * represents hash_mem, the item weights are the estimated memory
4363 * usage of the hashtables needed to implement a single rollup,
4364 * and we really ought to use the cost saving as the item value;
4365 * however, currently the costs assigned to sort nodes don't
4366 * reflect the comparison costs well, and so we treat all items as
4367 * of equal value (each rollup we hash instead saves us one sort).
4368 *
4369 * To use the discrete knapsack, we need to scale the values to a
4370 * reasonably small bounded range. We choose to allow a 5% error
4371 * margin; we have no more than 4096 rollups in the worst possible
4372 * case, which with a 5% error margin will require a bit over 42MB
4373 * of workspace. (Anyone wanting to plan queries that complex had
4374 * better have the memory for it. In more reasonable cases, with
4375 * no more than a couple of dozen rollups, the memory usage will
4376 * be negligible.)
4377 *
4378 * k_capacity is naturally bounded, but we clamp the values for
4379 * scale and weight (below) to avoid overflows or underflows (or
4380 * uselessly trying to use a scale factor less than 1 byte).
4381 */
4382 scale = Max(availspace / (20.0 * num_rollups), 1.0);
4383 k_capacity = (int) floor(availspace / scale);
4384
4385 /*
4386 * We leave the first rollup out of consideration since it's the
4387 * one that matches the input sort order. We assign indexes "i"
4388 * to only those entries considered for hashing; the second loop,
4389 * below, must use the same condition.
4390 */
4391 i = 0;
4392 for_each_from(lc, gd->rollups, 1)
4393 {
4394 RollupData *rollup = lfirst_node(RollupData, lc);
4395
4396 if (rollup->hashable)
4397 {
4398 double sz = estimate_hashagg_tablesize(root,
4399 path,
4400 agg_costs,
4401 rollup->numGroups);
4402
4403 /*
4404 * If sz is enormous, but hash_mem (and hence scale) is
4405 * small, avoid integer overflow here.
4406 */
4407 k_weights[i] = (int) Min(floor(sz / scale),
4408 k_capacity + 1.0);
4409 ++i;
4410 }
4411 }
4412
4413 /*
4414 * Apply knapsack algorithm; compute the set of items which
4415 * maximizes the value stored (in this case the number of sorts
4416 * saved) while keeping the total size (approximately) within
4417 * capacity.
4418 */
4419 if (i > 0)
4420 hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4421
4422 if (!bms_is_empty(hash_items))
4423 {
4424 rollups = list_make1(linitial(gd->rollups));
4425
4426 i = 0;
4427 for_each_from(lc, gd->rollups, 1)
4428 {
4429 RollupData *rollup = lfirst_node(RollupData, lc);
4430
4431 if (rollup->hashable)
4432 {
4433 if (bms_is_member(i, hash_items))
4434 hash_sets = list_concat(hash_sets,
4435 rollup->gsets_data);
4436 else
4437 rollups = lappend(rollups, rollup);
4438 ++i;
4439 }
4440 else
4441 rollups = lappend(rollups, rollup);
4442 }
4443 }
4444 }
4445
4446 if (!rollups && hash_sets)
4447 rollups = list_copy(gd->rollups);
4448
4449 foreach(lc, hash_sets)
4450 {
4452 RollupData *rollup = makeNode(RollupData);
4453
4454 Assert(gs->set != NIL);
4455
4457 rollup->gsets_data = list_make1(gs);
4458 rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4459 rollup->gsets_data,
4461 rollup->numGroups = gs->numGroups;
4462 rollup->hashable = true;
4463 rollup->is_hashed = true;
4464 rollups = lcons(rollup, rollups);
4465 }
4466
4467 if (rollups)
4468 {
4469 add_path(grouped_rel, (Path *)
4471 grouped_rel,
4472 path,
4473 (List *) parse->havingQual,
4474 AGG_MIXED,
4475 rollups,
4476 agg_costs));
4477 }
4478 }
4479
4480 /*
4481 * Now try the simple sorted case.
4482 */
4483 if (!gd->unsortable_sets)
4484 add_path(grouped_rel, (Path *)
4486 grouped_rel,
4487 path,
4488 (List *) parse->havingQual,
4489 AGG_SORTED,
4490 gd->rollups,
4491 agg_costs));
4492}
4493
4494/*
4495 * create_window_paths
4496 *
4497 * Build a new upperrel containing Paths for window-function evaluation.
4498 *
4499 * input_rel: contains the source-data Paths
4500 * input_target: result of make_window_input_target
4501 * output_target: what the topmost WindowAggPath should return
4502 * wflists: result of find_window_functions
4503 * activeWindows: result of select_active_windows
4504 *
4505 * Note: all Paths in input_rel are expected to return input_target.
4506 */
4507static RelOptInfo *
4509 RelOptInfo *input_rel,
4510 PathTarget *input_target,
4511 PathTarget *output_target,
4512 bool output_target_parallel_safe,
4513 WindowFuncLists *wflists,
4514 List *activeWindows)
4515{
4516 RelOptInfo *window_rel;
4517 ListCell *lc;
4518
4519 /* For now, do all work in the (WINDOW, NULL) upperrel */
4520 window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4521
4522 /*
4523 * If the input relation is not parallel-safe, then the window relation
4524 * can't be parallel-safe, either. Otherwise, we need to examine the
4525 * target list and active windows for non-parallel-safe constructs.
4526 */
4527 if (input_rel->consider_parallel && output_target_parallel_safe &&
4528 is_parallel_safe(root, (Node *) activeWindows))
4529 window_rel->consider_parallel = true;
4530
4531 /*
4532 * If the input rel belongs to a single FDW, so does the window rel.
4533 */
4534 window_rel->serverid = input_rel->serverid;
4535 window_rel->userid = input_rel->userid;
4536 window_rel->useridiscurrent = input_rel->useridiscurrent;
4537 window_rel->fdwroutine = input_rel->fdwroutine;
4538
4539 /*
4540 * Consider computing window functions starting from the existing
4541 * cheapest-total path (which will likely require a sort) as well as any
4542 * existing paths that satisfy or partially satisfy root->window_pathkeys.
4543 */
4544 foreach(lc, input_rel->pathlist)
4545 {
4546 Path *path = (Path *) lfirst(lc);
4547 int presorted_keys;
4548
4549 if (path == input_rel->cheapest_total_path ||
4550 pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4551 &presorted_keys) ||
4552 presorted_keys > 0)
4554 window_rel,
4555 path,
4556 input_target,
4557 output_target,
4558 wflists,
4559 activeWindows);
4560 }
4561
4562 /*
4563 * If there is an FDW that's responsible for all baserels of the query,
4564 * let it consider adding ForeignPaths.
4565 */
4566 if (window_rel->fdwroutine &&
4567 window_rel->fdwroutine->GetForeignUpperPaths)
4568 window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4569 input_rel, window_rel,
4570 NULL);
4571
4572 /* Let extensions possibly add some more paths */
4574 (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4575 input_rel, window_rel, NULL);
4576
4577 /* Now choose the best path(s) */
4578 set_cheapest(window_rel);
4579
4580 return window_rel;
4581}
4582
4583/*
4584 * Stack window-function implementation steps atop the given Path, and
4585 * add the result to window_rel.
4586 *
4587 * window_rel: upperrel to contain result
4588 * path: input Path to use (must return input_target)
4589 * input_target: result of make_window_input_target
4590 * output_target: what the topmost WindowAggPath should return
4591 * wflists: result of find_window_functions
4592 * activeWindows: result of select_active_windows
4593 */
4594static void
4596 RelOptInfo *window_rel,
4597 Path *path,
4598 PathTarget *input_target,
4599 PathTarget *output_target,
4600 WindowFuncLists *wflists,
4601 List *activeWindows)
4602{
4603 PathTarget *window_target;
4604 ListCell *l;
4605 List *topqual = NIL;
4606
4607 /*
4608 * Since each window clause could require a different sort order, we stack
4609 * up a WindowAgg node for each clause, with sort steps between them as
4610 * needed. (We assume that select_active_windows chose a good order for
4611 * executing the clauses in.)
4612 *
4613 * input_target should contain all Vars and Aggs needed for the result.
4614 * (In some cases we wouldn't need to propagate all of these all the way
4615 * to the top, since they might only be needed as inputs to WindowFuncs.
4616 * It's probably not worth trying to optimize that though.) It must also
4617 * contain all window partitioning and sorting expressions, to ensure
4618 * they're computed only once at the bottom of the stack (that's critical
4619 * for volatile functions). As we climb up the stack, we'll add outputs
4620 * for the WindowFuncs computed at each level.
4621 */
4622 window_target = input_target;
4623
4624 foreach(l, activeWindows)
4625 {
4627 List *window_pathkeys;
4628 List *runcondition = NIL;
4629 int presorted_keys;
4630 bool is_sorted;
4631 bool topwindow;
4632 ListCell *lc2;
4633
4634 window_pathkeys = make_pathkeys_for_window(root,
4635 wc,
4636 root->processed_tlist);
4637
4638 is_sorted = pathkeys_count_contained_in(window_pathkeys,
4639 path->pathkeys,
4640 &presorted_keys);
4641
4642 /* Sort if necessary */
4643 if (!is_sorted)
4644 {
4645 /*
4646 * No presorted keys or incremental sort disabled, just perform a
4647 * complete sort.
4648 */
4649 if (presorted_keys == 0 || !enable_incremental_sort)
4650 path = (Path *) create_sort_path(root, window_rel,
4651 path,
4652 window_pathkeys,
4653 -1.0);
4654 else
4655 {
4656 /*
4657 * Since we have presorted keys and incremental sort is
4658 * enabled, just use incremental sort.
4659 */
4661 window_rel,
4662 path,
4663 window_pathkeys,
4664 presorted_keys,
4665 -1.0);
4666 }
4667 }
4668
4669 if (lnext(activeWindows, l))
4670 {
4671 /*
4672 * Add the current WindowFuncs to the output target for this
4673 * intermediate WindowAggPath. We must copy window_target to
4674 * avoid changing the previous path's target.
4675 *
4676 * Note: a WindowFunc adds nothing to the target's eval costs; but
4677 * we do need to account for the increase in tlist width.
4678 */
4679 int64 tuple_width = window_target->width;
4680
4681 window_target = copy_pathtarget(window_target);
4682 foreach(lc2, wflists->windowFuncs[wc->winref])
4683 {
4684 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4685
4686 add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4687 tuple_width += get_typavgwidth(wfunc->wintype, -1);
4688 }
4689 window_target->width = clamp_width_est(tuple_width);
4690 }
4691 else
4692 {
4693 /* Install the goal target in the topmost WindowAgg */
4694 window_target = output_target;
4695 }
4696
4697 /* mark the final item in the list as the top-level window */
4698 topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4699
4700 /*
4701 * Collect the WindowFuncRunConditions from each WindowFunc and
4702 * convert them into OpExprs
4703 */
4704 foreach(lc2, wflists->windowFuncs[wc->winref])
4705 {
4706 ListCell *lc3;
4707 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4708
4709 foreach(lc3, wfunc->runCondition)
4710 {
4711 WindowFuncRunCondition *wfuncrc =
4713 Expr *opexpr;
4714 Expr *leftop;
4715 Expr *rightop;
4716
4717 if (wfuncrc->wfunc_left)
4718 {
4719 leftop = (Expr *) copyObject(wfunc);
4720 rightop = copyObject(wfuncrc->arg);
4721 }
4722 else
4723 {
4724 leftop = copyObject(wfuncrc->arg);
4725 rightop = (Expr *) copyObject(wfunc);
4726 }
4727
4728 opexpr = make_opclause(wfuncrc->opno,
4729 BOOLOID,
4730 false,
4731 leftop,
4732 rightop,
4733 InvalidOid,
4734 wfuncrc->inputcollid);
4735
4736 runcondition = lappend(runcondition, opexpr);
4737
4738 if (!topwindow)
4739 topqual = lappend(topqual, opexpr);
4740 }
4741 }
4742
4743 path = (Path *)
4744 create_windowagg_path(root, window_rel, path, window_target,
4745 wflists->windowFuncs[wc->winref],
4746 runcondition, wc,
4747 topwindow ? topqual : NIL, topwindow);
4748 }
4749
4750 add_path(window_rel, path);
4751}
4752
4753/*
4754 * create_distinct_paths
4755 *
4756 * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4757 *
4758 * input_rel: contains the source-data Paths
4759 * target: the pathtarget for the result Paths to compute
4760 *
4761 * Note: input paths should already compute the desired pathtarget, since
4762 * Sort/Unique won't project anything.
4763 */
4764static RelOptInfo *
4766 PathTarget *target)
4767{
4768 RelOptInfo *distinct_rel;
4769
4770 /* For now, do all work in the (DISTINCT, NULL) upperrel */
4771 distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4772
4773 /*
4774 * We don't compute anything at this level, so distinct_rel will be
4775 * parallel-safe if the input rel is parallel-safe. In particular, if
4776 * there is a DISTINCT ON (...) clause, any path for the input_rel will
4777 * output those expressions, and will not be parallel-safe unless those
4778 * expressions are parallel-safe.
4779 */
4780 distinct_rel->consider_parallel = input_rel->consider_parallel;
4781
4782 /*
4783 * If the input rel belongs to a single FDW, so does the distinct_rel.
4784 */
4785 distinct_rel->serverid = input_rel->serverid;
4786 distinct_rel->userid = input_rel->userid;
4787 distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4788 distinct_rel->fdwroutine = input_rel->fdwroutine;
4789
4790 /* build distinct paths based on input_rel's pathlist */
4791 create_final_distinct_paths(root, input_rel, distinct_rel);
4792
4793 /* now build distinct paths based on input_rel's partial_pathlist */
4794 create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4795
4796 /* Give a helpful error if we failed to create any paths */
4797 if (distinct_rel->pathlist == NIL)
4798 ereport(ERROR,
4799 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4800 errmsg("could not implement DISTINCT"),
4801 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4802
4803 /*
4804 * If there is an FDW that's responsible for all baserels of the query,
4805 * let it consider adding ForeignPaths.
4806 */
4807 if (distinct_rel->fdwroutine &&
4808 distinct_rel->fdwroutine->GetForeignUpperPaths)
4809 distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4811 input_rel,
4812 distinct_rel,
4813 NULL);
4814
4815 /* Let extensions possibly add some more paths */
4817 (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4818 distinct_rel, NULL);
4819
4820 /* Now choose the best path(s) */
4821 set_cheapest(distinct_rel);
4822
4823 return distinct_rel;
4824}
4825
4826/*
4827 * create_partial_distinct_paths
4828 *
4829 * Process 'input_rel' partial paths and add unique/aggregate paths to the
4830 * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4831 * paths on top and add a final unique/aggregate path to remove any duplicate
4832 * produced from combining rows from parallel workers.
4833 */
4834static void
4836 RelOptInfo *final_distinct_rel,
4837 PathTarget *target)
4838{
4839 RelOptInfo *partial_distinct_rel;
4840 Query *parse;
4841 List *distinctExprs;
4842 double numDistinctRows;
4843 Path *cheapest_partial_path;
4844 ListCell *lc;
4845
4846 /* nothing to do when there are no partial paths in the input rel */
4847 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4848 return;
4849
4850 parse = root->parse;
4851
4852 /* can't do parallel DISTINCT ON */
4853 if (parse->hasDistinctOn)
4854 return;
4855
4856 partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4857 NULL);
4858 partial_distinct_rel->reltarget = target;
4859 partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4860
4861 /*
4862 * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4863 */
4864 partial_distinct_rel->serverid = input_rel->serverid;
4865 partial_distinct_rel->userid = input_rel->userid;
4866 partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4867 partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4868
4869 cheapest_partial_path = linitial(input_rel->partial_pathlist);
4870
4871 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4872 parse->targetList);
4873
4874 /* estimate how many distinct rows we'll get from each worker */
4875 numDistinctRows = estimate_num_groups(root, distinctExprs,
4876 cheapest_partial_path->rows,
4877 NULL, NULL);
4878
4879 /*
4880 * Try sorting the cheapest path and incrementally sorting any paths with
4881 * presorted keys and put a unique paths atop of those. We'll also
4882 * attempt to reorder the required pathkeys to match the input path's
4883 * pathkeys as much as possible, in hopes of avoiding a possible need to
4884 * re-sort.
4885 */
4886 if (grouping_is_sortable(root->processed_distinctClause))
4887 {
4888 foreach(lc, input_rel->partial_pathlist)
4889 {
4890 Path *input_path = (Path *) lfirst(lc);
4891 Path *sorted_path;
4892 List *useful_pathkeys_list = NIL;
4893
4894 useful_pathkeys_list =
4896 root->distinct_pathkeys,
4897 input_path->pathkeys);
4898 Assert(list_length(useful_pathkeys_list) > 0);
4899
4900 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4901 {
4902 sorted_path = make_ordered_path(root,
4903 partial_distinct_rel,
4904 input_path,
4905 cheapest_partial_path,
4906 useful_pathkeys,
4907 -1.0);
4908
4909 if (sorted_path == NULL)
4910 continue;
4911
4912 /*
4913 * An empty distinct_pathkeys means all tuples have the same
4914 * value for the DISTINCT clause. See
4915 * create_final_distinct_paths()
4916 */
4917 if (root->distinct_pathkeys == NIL)
4918 {
4919 Node *limitCount;
4920
4921 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4922 sizeof(int64),
4923 Int64GetDatum(1), false,
4924 true);
4925
4926 /*
4927 * Apply a LimitPath onto the partial path to restrict the
4928 * tuples from each worker to 1.
4929 * create_final_distinct_paths will need to apply an
4930 * additional LimitPath to restrict this to a single row
4931 * after the Gather node. If the query already has a
4932 * LIMIT clause, then we could end up with three Limit
4933 * nodes in the final plan. Consolidating the top two of
4934 * these could be done, but does not seem worth troubling
4935 * over.
4936 */
4937 add_partial_path(partial_distinct_rel, (Path *)
4938 create_limit_path(root, partial_distinct_rel,
4939 sorted_path,
4940 NULL,
4941 limitCount,
4943 0, 1));
4944 }
4945 else
4946 {
4947 add_partial_path(partial_distinct_rel, (Path *)
4948 create_unique_path(root, partial_distinct_rel,
4949 sorted_path,
4950 list_length(root->distinct_pathkeys),
4951 numDistinctRows));
4952 }
4953 }
4954 }
4955 }
4956
4957 /*
4958 * Now try hash aggregate paths, if enabled and hashing is possible. Since
4959 * we're not on the hook to ensure we do our best to create at least one
4960 * path here, we treat enable_hashagg as a hard off-switch rather than the
4961 * slightly softer variant in create_final_distinct_paths.
4962 */
4963 if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4964 {
4965 add_partial_path(partial_distinct_rel, (Path *)
4967 partial_distinct_rel,
4968 cheapest_partial_path,
4969 cheapest_partial_path->pathtarget,
4970 AGG_HASHED,
4972 root->processed_distinctClause,
4973 NIL,
4974 NULL,
4975 numDistinctRows));
4976 }
4977
4978 /*
4979 * If there is an FDW that's responsible for all baserels of the query,
4980 * let it consider adding ForeignPaths.
4981 */
4982 if (partial_distinct_rel->fdwroutine &&
4983 partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4984 partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4986 input_rel,
4987 partial_distinct_rel,
4988 NULL);
4989
4990 /* Let extensions possibly add some more partial paths */
4992 (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4993 input_rel, partial_distinct_rel, NULL);
4994
4995 if (partial_distinct_rel->partial_pathlist != NIL)
4996 {
4997 generate_useful_gather_paths(root, partial_distinct_rel, true);
4998 set_cheapest(partial_distinct_rel);
4999
5000 /*
5001 * Finally, create paths to distinctify the final result. This step
5002 * is needed to remove any duplicates due to combining rows from
5003 * parallel workers.
5004 */
5005 create_final_distinct_paths(root, partial_distinct_rel,
5006 final_distinct_rel);
5007 }
5008}
5009
5010/*
5011 * create_final_distinct_paths
5012 * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
5013 *
5014 * input_rel: contains the source-data paths
5015 * distinct_rel: destination relation for storing created paths
5016 */
5017static RelOptInfo *
5019 RelOptInfo *distinct_rel)
5020{
5021 Query *parse = root->parse;
5022 Path *cheapest_input_path = input_rel->cheapest_total_path;
5023 double numDistinctRows;
5024 bool allow_hash;
5025
5026 /* Estimate number of distinct rows there will be */
5027 if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5028 root->hasHavingQual)
5029 {
5030 /*
5031 * If there was grouping or aggregation, use the number of input rows
5032 * as the estimated number of DISTINCT rows (ie, assume the input is
5033 * already mostly unique).
5034 */
5035 numDistinctRows = cheapest_input_path->rows;
5036 }
5037 else
5038 {
5039 /*
5040 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5041 */
5042 List *distinctExprs;
5043
5044 distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5045 parse->targetList);
5046 numDistinctRows = estimate_num_groups(root, distinctExprs,
5047 cheapest_input_path->rows,
5048 NULL, NULL);
5049 }
5050
5051 /*
5052 * Consider sort-based implementations of DISTINCT, if possible.
5053 */
5054 if (grouping_is_sortable(root->processed_distinctClause))
5055 {
5056 /*
5057 * Firstly, if we have any adequately-presorted paths, just stick a
5058 * Unique node on those. We also, consider doing an explicit sort of
5059 * the cheapest input path and Unique'ing that. If any paths have
5060 * presorted keys then we'll create an incremental sort atop of those
5061 * before adding a unique node on the top. We'll also attempt to
5062 * reorder the required pathkeys to match the input path's pathkeys as
5063 * much as possible, in hopes of avoiding a possible need to re-sort.
5064 *
5065 * When we have DISTINCT ON, we must sort by the more rigorous of
5066 * DISTINCT and ORDER BY, else it won't have the desired behavior.
5067 * Also, if we do have to do an explicit sort, we might as well use
5068 * the more rigorous ordering to avoid a second sort later. (Note
5069 * that the parser will have ensured that one clause is a prefix of
5070 * the other.)
5071 */
5072 List *needed_pathkeys;
5073 ListCell *lc;
5074 double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5075
5076 if (parse->hasDistinctOn &&
5077 list_length(root->distinct_pathkeys) <
5078 list_length(root->sort_pathkeys))
5079 needed_pathkeys = root->sort_pathkeys;
5080 else
5081 needed_pathkeys = root->distinct_pathkeys;
5082
5083 foreach(lc, input_rel->pathlist)
5084 {
5085 Path *input_path = (Path *) lfirst(lc);
5086 Path *sorted_path;
5087 List *useful_pathkeys_list = NIL;
5088
5089 useful_pathkeys_list =
5091 needed_pathkeys,
5092 input_path->pathkeys);
5093 Assert(list_length(useful_pathkeys_list) > 0);
5094
5095 foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5096 {
5097 sorted_path = make_ordered_path(root,
5098 distinct_rel,
5099 input_path,
5100 cheapest_input_path,
5101 useful_pathkeys,
5102 limittuples);
5103
5104 if (sorted_path == NULL)
5105 continue;
5106
5107 /*
5108 * distinct_pathkeys may have become empty if all of the
5109 * pathkeys were determined to be redundant. If all of the
5110 * pathkeys are redundant then each DISTINCT target must only
5111 * allow a single value, therefore all resulting tuples must
5112 * be identical (or at least indistinguishable by an equality
5113 * check). We can uniquify these tuples simply by just taking
5114 * the first tuple. All we do here is add a path to do "LIMIT
5115 * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5116 * still have a non-NIL sort_pathkeys list, so we must still
5117 * only do this with paths which are correctly sorted by
5118 * sort_pathkeys.
5119 */
5120 if (root->distinct_pathkeys == NIL)
5121 {
5122 Node *limitCount;
5123
5124 limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5125 sizeof(int64),
5126 Int64GetDatum(1), false,
5127 true);
5128
5129 /*
5130 * If the query already has a LIMIT clause, then we could
5131 * end up with a duplicate LimitPath in the final plan.
5132 * That does not seem worth troubling over too much.
5133 */
5134 add_path(distinct_rel, (Path *)
5135 create_limit_path(root, distinct_rel, sorted_path,
5136 NULL, limitCount,
5137 LIMIT_OPTION_COUNT, 0, 1));
5138 }
5139 else
5140 {
5141 add_path(distinct_rel, (Path *)
5142 create_unique_path(root, distinct_rel,
5143 sorted_path,
5144 list_length(root->distinct_pathkeys),
5145 numDistinctRows));
5146 }
5147 }
5148 }
5149 }
5150
5151 /*
5152 * Consider hash-based implementations of DISTINCT, if possible.
5153 *
5154 * If we were not able to make any other types of path, we *must* hash or
5155 * die trying. If we do have other choices, there are two things that
5156 * should prevent selection of hashing: if the query uses DISTINCT ON
5157 * (because it won't really have the expected behavior if we hash), or if
5158 * enable_hashagg is off.
5159 *
5160 * Note: grouping_is_hashable() is much more expensive to check than the
5161 * other gating conditions, so we want to do it last.
5162 */
5163 if (distinct_rel->pathlist == NIL)
5164 allow_hash = true; /* we have no alternatives */
5165 else if (parse->hasDistinctOn || !enable_hashagg)
5166 allow_hash = false; /* policy-based decision not to hash */
5167 else
5168 allow_hash = true; /* default */
5169
5170 if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5171 {
5172 /* Generate hashed aggregate path --- no sort needed */
5173 add_path(distinct_rel, (Path *)
5175 distinct_rel,
5176 cheapest_input_path,
5177 cheapest_input_path->pathtarget,
5178 AGG_HASHED,
5180 root->processed_distinctClause,
5181 NIL,
5182 NULL,
5183 numDistinctRows));
5184 }
5185
5186 return distinct_rel;
5187}
5188
5189/*
5190 * get_useful_pathkeys_for_distinct
5191 * Get useful orderings of pathkeys for distinctClause by reordering
5192 * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5193 *
5194 * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5195 * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5196 */
5197static List *
5199 List *path_pathkeys)
5200{
5201 List *useful_pathkeys_list = NIL;
5202 List *useful_pathkeys = NIL;
5203
5204 /* always include the given 'needed_pathkeys' */
5205 useful_pathkeys_list = lappend(useful_pathkeys_list,
5206 needed_pathkeys);
5207
5209 return useful_pathkeys_list;
5210
5211 /*
5212 * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5213 * that match 'needed_pathkeys', but only up to the longest matching
5214 * prefix.
5215 *
5216 * When we have DISTINCT ON, we must ensure that the resulting pathkey
5217 * list matches initial distinctClause pathkeys; otherwise, it won't have
5218 * the desired behavior.
5219 */
5220 foreach_node(PathKey, pathkey, path_pathkeys)
5221 {
5222 /*
5223 * The PathKey nodes are canonical, so they can be checked for
5224 * equality by simple pointer comparison.
5225 */
5226 if (!list_member_ptr(needed_pathkeys, pathkey))
5227 break;
5228 if (root->parse->hasDistinctOn &&
5229 !list_member_ptr(root->distinct_pathkeys, pathkey))
5230 break;
5231
5232 useful_pathkeys = lappend(useful_pathkeys, pathkey);
5233 }
5234
5235 /* If no match at all, no point in reordering needed_pathkeys */
5236 if (useful_pathkeys == NIL)
5237 return useful_pathkeys_list;
5238
5239 /*
5240 * If not full match, the resulting pathkey list is not useful without
5241 * incremental sort.
5242 */
5243 if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5245 return useful_pathkeys_list;
5246
5247 /* Append the remaining PathKey nodes in needed_pathkeys */
5248 useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5249 needed_pathkeys);
5250
5251 /*
5252 * If the resulting pathkey list is the same as the 'needed_pathkeys',
5253 * just drop it.
5254 */
5255 if (compare_pathkeys(needed_pathkeys,
5256 useful_pathkeys) == PATHKEYS_EQUAL)
5257 return useful_pathkeys_list;
5258
5259 useful_pathkeys_list = lappend(useful_pathkeys_list,
5260 useful_pathkeys);
5261
5262 return useful_pathkeys_list;
5263}
5264
5265/*
5266 * create_ordered_paths
5267 *
5268 * Build a new upperrel containing Paths for ORDER BY evaluation.
5269 *
5270 * All paths in the result must satisfy the ORDER BY ordering.
5271 * The only new paths we need consider are an explicit full sort
5272 * and incremental sort on the cheapest-total existing path.
5273 *
5274 * input_rel: contains the source-data Paths
5275 * target: the output tlist the result Paths must emit
5276 * limit_tuples: estimated bound on the number of output tuples,
5277 * or -1 if no LIMIT or couldn't estimate
5278 *
5279 * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5280 * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5281 */
5282static RelOptInfo *
5284 RelOptInfo *input_rel,
5285 PathTarget *target,
5286 bool target_parallel_safe,
5287 double limit_tuples)
5288{
5289 Path *cheapest_input_path = input_rel->cheapest_total_path;
5290 RelOptInfo *ordered_rel;
5291 ListCell *lc;
5292
5293 /* For now, do all work in the (ORDERED, NULL) upperrel */
5294 ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5295
5296 /*
5297 * If the input relation is not parallel-safe, then the ordered relation
5298 * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5299 * target list is parallel-safe.
5300 */
5301 if (input_rel->consider_parallel && target_parallel_safe)
5302 ordered_rel->consider_parallel = true;
5303
5304 /*
5305 * If the input rel belongs to a single FDW, so does the ordered_rel.
5306 */
5307 ordered_rel->serverid = input_rel->serverid;
5308 ordered_rel->userid = input_rel->userid;
5309 ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5310 ordered_rel->fdwroutine = input_rel->fdwroutine;
5311
5312 foreach(lc, input_rel->pathlist)
5313 {
5314 Path *input_path = (Path *) lfirst(lc);
5315 Path *sorted_path;
5316 bool is_sorted;
5317 int presorted_keys;
5318
5319 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5320 input_path->pathkeys, &presorted_keys);
5321
5322 if (is_sorted)
5323 sorted_path = input_path;
5324 else
5325 {
5326 /*
5327 * Try at least sorting the cheapest path and also try
5328 * incrementally sorting any path which is partially sorted
5329 * already (no need to deal with paths which have presorted keys
5330 * when incremental sort is disabled unless it's the cheapest
5331 * input path).
5332 */
5333 if (input_path != cheapest_input_path &&
5334 (presorted_keys == 0 || !enable_incremental_sort))
5335 continue;
5336
5337 /*
5338 * We've no need to consider both a sort and incremental sort.
5339 * We'll just do a sort if there are no presorted keys and an
5340 * incremental sort when there are presorted keys.
5341 */
5342 if (presorted_keys == 0 || !enable_incremental_sort)
5343 sorted_path = (Path *) create_sort_path(root,
5344 ordered_rel,
5345 input_path,
5346 root->sort_pathkeys,
5347 limit_tuples);
5348 else
5349 sorted_path = (Path *) create_incremental_sort_path(root,
5350 ordered_rel,
5351 input_path,
5352 root->sort_pathkeys,
5353 presorted_keys,
5354 limit_tuples);
5355 }
5356
5357 /*
5358 * If the pathtarget of the result path has different expressions from
5359 * the target to be applied, a projection step is needed.
5360 */
5361 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5362 sorted_path = apply_projection_to_path(root, ordered_rel,
5363 sorted_path, target);
5364
5365 add_path(ordered_rel, sorted_path);
5366 }
5367
5368 /*
5369 * generate_gather_paths() will have already generated a simple Gather
5370 * path for the best parallel path, if any, and the loop above will have
5371 * considered sorting it. Similarly, generate_gather_paths() will also
5372 * have generated order-preserving Gather Merge plans which can be used
5373 * without sorting if they happen to match the sort_pathkeys, and the loop
5374 * above will have handled those as well. However, there's one more
5375 * possibility: it may make sense to sort the cheapest partial path or
5376 * incrementally sort any partial path that is partially sorted according
5377 * to the required output order and then use Gather Merge.
5378 */
5379 if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5380 input_rel->partial_pathlist != NIL)
5381 {
5382 Path *cheapest_partial_path;
5383
5384 cheapest_partial_path = linitial(input_rel->partial_pathlist);
5385
5386 foreach(lc, input_rel->partial_pathlist)
5387 {
5388 Path *input_path = (Path *) lfirst(lc);
5389 Path *sorted_path;
5390 bool is_sorted;
5391 int presorted_keys;
5392 double total_groups;
5393
5394 is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5395 input_path->pathkeys,
5396 &presorted_keys);
5397
5398 if (is_sorted)
5399 continue;
5400
5401 /*
5402 * Try at least sorting the cheapest path and also try
5403 * incrementally sorting any path which is partially sorted
5404 * already (no need to deal with paths which have presorted keys
5405 * when incremental sort is disabled unless it's the cheapest
5406 * partial path).
5407 */
5408 if (input_path != cheapest_partial_path &&
5409 (presorted_keys == 0 || !enable_incremental_sort))
5410 continue;
5411
5412 /*
5413 * We've no need to consider both a sort and incremental sort.
5414 * We'll just do a sort if there are no presorted keys and an
5415 * incremental sort when there are presorted keys.
5416 */
5417 if (presorted_keys == 0 || !enable_incremental_sort)
5418 sorted_path = (Path *) create_sort_path(root,
5419 ordered_rel,
5420 input_path,
5421 root->sort_pathkeys,
5422 limit_tuples);
5423 else
5424 sorted_path = (Path *) create_incremental_sort_path(root,
5425 ordered_rel,
5426 input_path,
5427 root->sort_pathkeys,
5428 presorted_keys,
5429 limit_tuples);
5430 total_groups = compute_gather_rows(sorted_path);
5431 sorted_path = (Path *)
5432 create_gather_merge_path(root, ordered_rel,
5433 sorted_path,
5434 sorted_path->pathtarget,
5435 root->sort_pathkeys, NULL,
5436 &total_groups);
5437
5438 /*
5439 * If the pathtarget of the result path has different expressions
5440 * from the target to be applied, a projection step is needed.
5441 */
5442 if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5443 sorted_path = apply_projection_to_path(root, ordered_rel,
5444 sorted_path, target);
5445
5446 add_path(ordered_rel, sorted_path);
5447 }
5448 }
5449
5450 /*
5451 * If there is an FDW that's responsible for all baserels of the query,
5452 * let it consider adding ForeignPaths.
5453 */
5454 if (ordered_rel->fdwroutine &&
5455 ordered_rel->fdwroutine->GetForeignUpperPaths)
5456 ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5457 input_rel, ordered_rel,
5458 NULL);
5459
5460 /* Let extensions possibly add some more paths */
5462 (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5463 input_rel, ordered_rel, NULL);
5464
5465 /*
5466 * No need to bother with set_cheapest here; grouping_planner does not
5467 * need us to do it.
5468 */
5469 Assert(ordered_rel->pathlist != NIL);
5470
5471 return ordered_rel;
5472}
5473
5474
5475/*
5476 * make_group_input_target
5477 * Generate appropriate PathTarget for initial input to grouping nodes.
5478 *
5479 * If there is grouping or aggregation, the scan/join subplan cannot emit
5480 * the query's final targetlist; for example, it certainly can't emit any
5481 * aggregate function calls. This routine generates the correct target
5482 * for the scan/join subplan.
5483 *
5484 * The query target list passed from the parser already contains entries
5485 * for all ORDER BY and GROUP BY expressions, but it will not have entries
5486 * for variables used only in HAVING clauses; so we need to add those
5487 * variables to the subplan target list. Also, we flatten all expressions
5488 * except GROUP BY items into their component variables; other expressions
5489 * will be computed by the upper plan nodes rather than by the subplan.
5490 * For example, given a query like
5491 * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5492 * we want to pass this targetlist to the subplan:
5493 * a+b,c,d
5494 * where the a+b target will be used by the Sort/Group steps, and the
5495 * other targets will be used for computing the final results.
5496 *
5497 * 'final_target' is the query's final target list (in PathTarget form)
5498 *
5499 * The result is the PathTarget to be computed by the Paths returned from
5500 * query_planner().
5501 */
5502static PathTarget *
5504{
5505 Query *parse = root->parse;
5506 PathTarget *input_target;
5507 List *non_group_cols;
5508 List *non_group_vars;
5509 int i;
5510 ListCell *lc;
5511
5512 /*
5513 * We must build a target containing all grouping columns, plus any other
5514 * Vars mentioned in the query's targetlist and HAVING qual.
5515 */
5516 input_target = create_empty_pathtarget();
5517 non_group_cols = NIL;
5518
5519 i = 0;
5520 foreach(lc, final_target->exprs)
5521 {
5522 Expr *expr = (Expr *) lfirst(lc);
5523 Index sgref = get_pathtarget_sortgroupref(final_target, i);
5524
5525 if (sgref && root->processed_groupClause &&
5527 root->processed_groupClause) != NULL)
5528 {
5529 /*
5530 * It's a grouping column, so add it to the input target as-is.
5531 *
5532 * Note that the target is logically below the grouping step. So
5533 * with grouping sets we need to remove the RT index of the
5534 * grouping step if there is any from the target expression.
5535 */
5536 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5537 {
5538 Assert(root->group_rtindex > 0);
5539 expr = (Expr *)
5540 remove_nulling_relids((Node *) expr,
5541 bms_make_singleton(root->group_rtindex),
5542 NULL);
5543 }
5544 add_column_to_pathtarget(input_target, expr, sgref);
5545 }
5546 else
5547 {
5548 /*
5549 * Non-grouping column, so just remember the expression for later
5550 * call to pull_var_clause.
5551 */
5552 non_group_cols = lappend(non_group_cols, expr);
5553 }
5554
5555 i++;
5556 }
5557
5558 /*
5559 * If there's a HAVING clause, we'll need the Vars it uses, too.
5560 */
5561 if (parse->havingQual)
5562 non_group_cols = lappend(non_group_cols, parse->havingQual);
5563
5564 /*
5565 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5566 * add them to the input target if not already present. (A Var used
5567 * directly as a GROUP BY item will be present already.) Note this
5568 * includes Vars used in resjunk items, so we are covering the needs of
5569 * ORDER BY and window specifications. Vars used within Aggrefs and
5570 * WindowFuncs will be pulled out here, too.
5571 *
5572 * Note that the target is logically below the grouping step. So with
5573 * grouping sets we need to remove the RT index of the grouping step if
5574 * there is any from the non-group Vars.
5575 */
5576 non_group_vars = pull_var_clause((Node *) non_group_cols,
5580 if (parse->hasGroupRTE && parse->groupingSets != NIL)
5581 {
5582 Assert(root->group_rtindex > 0);
5583 non_group_vars = (List *)
5584 remove_nulling_relids((Node *) non_group_vars,
5585 bms_make_singleton(root->group_rtindex),
5586 NULL);
5587 }
5588 add_new_columns_to_pathtarget(input_target, non_group_vars);
5589
5590 /* clean up cruft */
5591 list_free(non_group_vars);
5592 list_free(non_group_cols);
5593
5594 /* XXX this causes some redundant cost calculation ... */
5595 return set_pathtarget_cost_width(root, input_target);
5596}
5597
5598/*
5599 * make_partial_grouping_target
5600 * Generate appropriate PathTarget for output of partial aggregate
5601 * (or partial grouping, if there are no aggregates) nodes.
5602 *
5603 * A partial aggregation node needs to emit all the same aggregates that
5604 * a regular aggregation node would, plus any aggregates used in HAVING;
5605 * except that the Aggref nodes should be marked as partial aggregates.
5606 *
5607 * In addition, we'd better emit any Vars and PlaceHolderVars that are
5608 * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5609 * these would be Vars that are grouped by or used in grouping expressions.)
5610 *
5611 * grouping_target is the tlist to be emitted by the topmost aggregation step.
5612 * havingQual represents the HAVING clause.
5613 */
5614static PathTarget *
5616 PathTarget *grouping_target,
5617 Node *havingQual)
5618{
5619 PathTarget *partial_target;
5620 List *non_group_cols;
5621 List *non_group_exprs;
5622 int i;
5623 ListCell *lc;
5624
5625 partial_target = create_empty_pathtarget();
5626 non_group_cols = NIL;
5627
5628 i = 0;
5629 foreach(lc, grouping_target->exprs)
5630 {
5631 Expr *expr = (Expr *) lfirst(lc);
5632 Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5633
5634 if (sgref && root->processed_groupClause &&
5636 root->processed_groupClause) != NULL)
5637 {
5638 /*
5639 * It's a grouping column, so add it to the partial_target as-is.
5640 * (This allows the upper agg step to repeat the grouping calcs.)
5641 */
5642 add_column_to_pathtarget(partial_target, expr, sgref);
5643 }
5644 else
5645 {
5646 /*
5647 * Non-grouping column, so just remember the expression for later
5648 * call to pull_var_clause.
5649 */
5650 non_group_cols = lappend(non_group_cols, expr);
5651 }
5652
5653 i++;
5654 }
5655
5656 /*
5657 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5658 */
5659 if (havingQual)
5660 non_group_cols = lappend(non_group_cols, havingQual);
5661
5662 /*
5663 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5664 * non-group cols (plus HAVING), and add them to the partial_target if not
5665 * already present. (An expression used directly as a GROUP BY item will
5666 * be present already.) Note this includes Vars used in resjunk items, so
5667 * we are covering the needs of ORDER BY and window specifications.
5668 */
5669 non_group_exprs = pull_var_clause((Node *) non_group_cols,
5673
5674 add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5675
5676 /*
5677 * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5678 * are at the top level of the target list, so we can just scan the list
5679 * rather than recursing through the expression trees.
5680 */
5681 foreach(lc, partial_target->exprs)
5682 {
5683 Aggref *aggref = (Aggref *) lfirst(lc);
5684
5685 if (IsA(aggref, Aggref))
5686 {
5687 Aggref *newaggref;
5688
5689 /*
5690 * We shouldn't need to copy the substructure of the Aggref node,
5691 * but flat-copy the node itself to avoid damaging other trees.
5692 */
5693 newaggref = makeNode(Aggref);
5694 memcpy(newaggref, aggref, sizeof(Aggref));
5695
5696 /* For now, assume serialization is required */
5698
5699 lfirst(lc) = newaggref;
5700 }
5701 }
5702
5703 /* clean up cruft */
5704 list_free(non_group_exprs);
5705 list_free(non_group_cols);
5706
5707 /* XXX this causes some redundant cost calculation ... */
5708 return set_pathtarget_cost_width(root, partial_target);
5709}
5710
5711/*
5712 * mark_partial_aggref
5713 * Adjust an Aggref to make it represent a partial-aggregation step.
5714 *
5715 * The Aggref node is modified in-place; caller must do any copying required.
5716 */
5717void
5719{
5720 /* aggtranstype should be computed by this point */
5721 Assert(OidIsValid(agg->aggtranstype));
5722 /* ... but aggsplit should still be as the parser left it */
5723 Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5724
5725 /* Mark the Aggref with the intended partial-aggregation mode */
5726 agg->aggsplit = aggsplit;
5727
5728 /*
5729 * Adjust result type if needed. Normally, a partial aggregate returns
5730 * the aggregate's transition type; but if that's INTERNAL and we're
5731 * serializing, it returns BYTEA instead.
5732 */
5733 if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5734 {
5735 if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5736 agg->aggtype = BYTEAOID;
5737 else
5738 agg->aggtype = agg->aggtranstype;
5739 }
5740}
5741
5742/*
5743 * postprocess_setop_tlist
5744 * Fix up targetlist returned by plan_set_operations().
5745 *
5746 * We need to transpose sort key info from the orig_tlist into new_tlist.
5747 * NOTE: this would not be good enough if we supported resjunk sort keys
5748 * for results of set operations --- then, we'd need to project a whole
5749 * new tlist to evaluate the resjunk columns. For now, just ereport if we
5750 * find any resjunk columns in orig_tlist.
5751 */
5752static List *
5753postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5754{
5755 ListCell *l;
5756 ListCell *orig_tlist_item = list_head(orig_tlist);
5757
5758 foreach(l, new_tlist)
5759 {
5760 TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5761 TargetEntry *orig_tle;
5762
5763 /* ignore resjunk columns in setop result */
5764 if (new_tle->resjunk)
5765 continue;
5766
5767 Assert(orig_tlist_item != NULL);
5768 orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5769 orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5770 if (orig_tle->resjunk) /* should not happen */
5771 elog(ERROR, "resjunk output columns are not implemented");
5772 Assert(new_tle->resno == orig_tle->resno);
5773 new_tle->ressortgroupref = orig_tle->ressortgroupref;
5774 }
5775 if (orig_tlist_item != NULL)
5776 elog(ERROR, "resjunk output columns are not implemented");
5777 return new_tlist;
5778}
5779
5780/*
5781 * optimize_window_clauses
5782 * Call each WindowFunc's prosupport function to see if we're able to
5783 * make any adjustments to any of the WindowClause's so that the executor
5784 * can execute the window functions in a more optimal way.
5785 *
5786 * Currently we only allow adjustments to the WindowClause's frameOptions. We
5787 * may allow more things to be done here in the future.
5788 */
5789static void
5791{
5792 List *windowClause = root->parse->windowClause;
5793 ListCell *lc;
5794
5795 foreach(lc, windowClause)
5796 {
5798 ListCell *lc2;
5799 int optimizedFrameOptions = 0;
5800
5801 Assert(wc->winref <= wflists->maxWinRef);
5802
5803 /* skip any WindowClauses that have no WindowFuncs */
5804 if (wflists->windowFuncs[wc->winref] == NIL)
5805 continue;
5806
5807 foreach(lc2, wflists->windowFuncs[wc->winref])
5808 {
5811 WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5812 Oid prosupport;
5813
5814 prosupport = get_func_support(wfunc->winfnoid);
5815
5816 /* Check if there's a support function for 'wfunc' */
5817 if (!OidIsValid(prosupport))
5818 break; /* can't optimize this WindowClause */
5819
5820 req.type = T_SupportRequestOptimizeWindowClause;
5821 req.window_clause = wc;
5822 req.window_func = wfunc;
5823 req.frameOptions = wc->frameOptions;
5824
5825 /* call the support function */
5828 PointerGetDatum(&req)));
5829
5830 /*
5831 * Skip to next WindowClause if the support function does not
5832 * support this request type.
5833 */
5834 if (res == NULL)
5835 break;
5836
5837 /*
5838 * Save these frameOptions for the first WindowFunc for this
5839 * WindowClause.
5840 */
5841 if (foreach_current_index(lc2) == 0)
5842 optimizedFrameOptions = res->frameOptions;
5843
5844 /*
5845 * On subsequent WindowFuncs, if the frameOptions are not the same
5846 * then we're unable to optimize the frameOptions for this
5847 * WindowClause.
5848 */
5849 else if (optimizedFrameOptions != res->frameOptions)
5850 break; /* skip to the next WindowClause, if any */
5851 }
5852
5853 /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5854 if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5855 {
5856 ListCell *lc3;
5857
5858 /* apply the new frame options */
5859 wc->frameOptions = optimizedFrameOptions;
5860
5861 /*
5862 * We now check to see if changing the frameOptions has caused
5863 * this WindowClause to be a duplicate of some other WindowClause.
5864 * This can only happen if we have multiple WindowClauses, so
5865 * don't bother if there's only 1.
5866 */
5867 if (list_length(windowClause) == 1)
5868 continue;
5869
5870 /*
5871 * Do the duplicate check and reuse the existing WindowClause if
5872 * we find a duplicate.
5873 */
5874 foreach(lc3, windowClause)
5875 {
5876 WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5877
5878 /* skip over the WindowClause we're currently editing */
5879 if (existing_wc == wc)
5880 continue;
5881
5882 /*
5883 * Perform the same duplicate check that is done in
5884 * transformWindowFuncCall.
5885 */
5886 if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5887 equal(wc->orderClause, existing_wc->orderClause) &&
5888 wc->frameOptions == existing_wc->frameOptions &&
5889 equal(wc->startOffset, existing_wc->startOffset) &&
5890 equal(wc->endOffset, existing_wc->endOffset))
5891 {
5892 ListCell *lc4;
5893
5894 /*
5895 * Now move each WindowFunc in 'wc' into 'existing_wc'.
5896 * This required adjusting each WindowFunc's winref and
5897 * moving the WindowFuncs in 'wc' to the list of
5898 * WindowFuncs in 'existing_wc'.
5899 */
5900 foreach(lc4, wflists->windowFuncs[wc->winref])
5901 {
5902 WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5903
5904 wfunc->winref = existing_wc->winref;
5905 }
5906
5907 /* move list items */
5908 wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5909 wflists->windowFuncs[wc->winref]);
5910 wflists->windowFuncs[wc->winref] = NIL;
5911
5912 /*
5913 * transformWindowFuncCall() should have made sure there
5914 * are no other duplicates, so we needn't bother looking
5915 * any further.
5916 */
5917 break;
5918 }
5919 }
5920 }
5921 }
5922}
5923
5924/*
5925 * select_active_windows
5926 * Create a list of the "active" window clauses (ie, those referenced
5927 * by non-deleted WindowFuncs) in the order they are to be executed.
5928 */
5929static List *
5931{
5932 List *windowClause = root->parse->windowClause;
5933 List *result = NIL;
5934 ListCell *lc;
5935 int nActive = 0;
5937 * list_length(windowClause));
5938
5939 /* First, construct an array of the active windows */
5940 foreach(lc, windowClause)
5941 {
5943
5944 /* It's only active if wflists shows some related WindowFuncs */
5945 Assert(wc->winref <= wflists->maxWinRef);
5946 if (wflists->windowFuncs[wc->winref] == NIL)
5947 continue;
5948
5949 actives[nActive].wc = wc; /* original clause */
5950
5951 /*
5952 * For sorting, we want the list of partition keys followed by the
5953 * list of sort keys. But pathkeys construction will remove duplicates
5954 * between the two, so we can as well (even though we can't detect all
5955 * of the duplicates, since some may come from ECs - that might mean
5956 * we miss optimization chances here). We must, however, ensure that
5957 * the order of entries is preserved with respect to the ones we do
5958 * keep.
5959 *
5960 * partitionClause and orderClause had their own duplicates removed in
5961 * parse analysis, so we're only concerned here with removing
5962 * orderClause entries that also appear in partitionClause.
5963 */
5964 actives[nActive].uniqueOrder =
5966 wc->orderClause);
5967 nActive++;
5968 }
5969
5970 /*
5971 * Sort active windows by their partitioning/ordering clauses, ignoring
5972 * any framing clauses, so that the windows that need the same sorting are
5973 * adjacent in the list. When we come to generate paths, this will avoid
5974 * inserting additional Sort nodes.
5975 *
5976 * This is how we implement a specific requirement from the SQL standard,
5977 * which says that when two or more windows are order-equivalent (i.e.
5978 * have matching partition and order clauses, even if their names or
5979 * framing clauses differ), then all peer rows must be presented in the
5980 * same order in all of them. If we allowed multiple sort nodes for such
5981 * cases, we'd risk having the peer rows end up in different orders in
5982 * equivalent windows due to sort instability. (See General Rule 4 of
5983 * <window clause> in SQL2008 - SQL2016.)
5984 *
5985 * Additionally, if the entire list of clauses of one window is a prefix
5986 * of another, put first the window with stronger sorting requirements.
5987 * This way we will first sort for stronger window, and won't have to sort
5988 * again for the weaker one.
5989 */
5990 qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5991
5992 /* build ordered list of the original WindowClause nodes */
5993 for (int i = 0; i < nActive; i++)
5994 result = lappend(result, actives[i].wc);
5995
5996 pfree(actives);
5997
5998 return result;
5999}
6000
6001/*
6002 * name_active_windows
6003 * Ensure all active windows have unique names.
6004 *
6005 * The parser will have checked that user-assigned window names are unique
6006 * within the Query. Here we assign made-up names to any unnamed
6007 * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
6008 * at parse time, because it'd mess up decompilation of views.)
6009 *
6010 * activeWindows: result of select_active_windows
6011 */
6012static void
6014{
6015 int next_n = 1;
6016 char newname[16];
6017 ListCell *lc;
6018
6019 foreach(lc, activeWindows)
6020 {
6022
6023 /* Nothing to do if it has a name already. */
6024 if (wc->name)
6025 continue;
6026
6027 /* Select a name not currently present in the list. */
6028 for (;;)
6029 {
6030 ListCell *lc2;
6031
6032 snprintf(newname, sizeof(newname), "w%d", next_n++);
6033 foreach(lc2, activeWindows)
6034 {
6036
6037 if (wc2->name && strcmp(wc2->name, newname) == 0)
6038 break; /* matched */
6039 }
6040 if (lc2 == NULL)
6041 break; /* reached the end with no match */
6042 }
6043 wc->name = pstrdup(newname);
6044 }
6045}
6046
6047/*
6048 * common_prefix_cmp
6049 * QSort comparison function for WindowClauseSortData
6050 *
6051 * Sort the windows by the required sorting clauses. First, compare the sort
6052 * clauses themselves. Second, if one window's clauses are a prefix of another
6053 * one's clauses, put the window with more sort clauses first.
6054 *
6055 * We purposefully sort by the highest tleSortGroupRef first. Since
6056 * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6057 * and because here we sort the lowest tleSortGroupRefs last, if a
6058 * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6059 * ORDER BY clause, this makes it more likely that the final WindowAgg will
6060 * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6061 * reducing the total number of sorts required for the query.
6062 */
6063static int
6064common_prefix_cmp(const void *a, const void *b)
6065{
6066 const WindowClauseSortData *wcsa = a;
6067 const WindowClauseSortData *wcsb = b;
6068 ListCell *item_a;
6069 ListCell *item_b;
6070
6071 forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6072 {
6075
6076 if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6077 return -1;
6078 else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6079 return 1;
6080 else if (sca->sortop > scb->sortop)
6081 return -1;
6082 else if (sca->sortop < scb->sortop)
6083 return 1;
6084 else if (sca->nulls_first && !scb->nulls_first)
6085 return -1;
6086 else if (!sca->nulls_first && scb->nulls_first)
6087 return 1;
6088 /* no need to compare eqop, since it is fully determined by sortop */
6089 }
6090
6091 if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6092 return -1;
6093 else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6094 return 1;
6095
6096 return 0;
6097}
6098
6099/*
6100 * make_window_input_target
6101 * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6102 *
6103 * When the query has window functions, this function computes the desired
6104 * target to be computed by the node just below the first WindowAgg.
6105 * This tlist must contain all values needed to evaluate the window functions,
6106 * compute the final target list, and perform any required final sort step.
6107 * If multiple WindowAggs are needed, each intermediate one adds its window
6108 * function results onto this base tlist; only the topmost WindowAgg computes
6109 * the actual desired target list.
6110 *
6111 * This function is much like make_group_input_target, though not quite enough
6112 * like it to share code. As in that function, we flatten most expressions
6113 * into their component variables. But we do not want to flatten window
6114 * PARTITION BY/ORDER BY clauses, since that might result in multiple
6115 * evaluations of them, which would be bad (possibly even resulting in
6116 * inconsistent answers, if they contain volatile functions).
6117 * Also, we must not flatten GROUP BY clauses that were left unflattened by
6118 * make_group_input_target, because we may no longer have access to the
6119 * individual Vars in them.
6120 *
6121 * Another key difference from make_group_input_target is that we don't
6122 * flatten Aggref expressions, since those are to be computed below the
6123 * window functions and just referenced like Vars above that.
6124 *
6125 * 'final_target' is the query's final target list (in PathTarget form)
6126 * 'activeWindows' is the list of active windows previously identified by
6127 * select_active_windows.
6128 *
6129 * The result is the PathTarget to be computed by the plan node immediately
6130 * below the first WindowAgg node.
6131 */
6132static PathTarget *
6134 PathTarget *final_target,
6135 List *activeWindows)
6136{
6137 PathTarget *input_target;
6138 Bitmapset *sgrefs;
6139 List *flattenable_cols;
6140 List *flattenable_vars;
6141 int i;
6142 ListCell *lc;
6143
6144 Assert(root->parse->hasWindowFuncs);
6145
6146 /*
6147 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6148 * into a bitmapset for convenient reference below.
6149 */
6150 sgrefs = NULL;
6151 foreach(lc, activeWindows)
6152 {
6154 ListCell *lc2;
6155
6156 foreach(lc2, wc->partitionClause)
6157 {
6159
6160 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6161 }
6162 foreach(lc2, wc->orderClause)
6163 {
6165
6166 sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6167 }
6168 }
6169
6170 /* Add in sortgroupref numbers of GROUP BY clauses, too */
6171 foreach(lc, root->processed_groupClause)
6172 {
6174
6175 sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6176 }
6177
6178 /*
6179 * Construct a target containing all the non-flattenable targetlist items,
6180 * and save aside the others for a moment.
6181 */
6182 input_target = create_empty_pathtarget();
6183 flattenable_cols = NIL;
6184
6185 i = 0;
6186 foreach(lc, final_target->exprs)
6187 {
6188 Expr *expr = (Expr *) lfirst(lc);
6189 Index sgref = get_pathtarget_sortgroupref(final_target, i);
6190
6191 /*
6192 * Don't want to deconstruct window clauses or GROUP BY items. (Note
6193 * that such items can't contain window functions, so it's okay to
6194 * compute them below the WindowAgg nodes.)
6195 */
6196 if (sgref != 0 && bms_is_member(sgref, sgrefs))
6197 {
6198 /*
6199 * Don't want to deconstruct this value, so add it to the input
6200 * target as-is.
6201 */
6202 add_column_to_pathtarget(input_target, expr, sgref);
6203 }
6204 else
6205 {
6206 /*
6207 * Column is to be flattened, so just remember the expression for
6208 * later call to pull_var_clause.
6209 */
6210 flattenable_cols = lappend(flattenable_cols, expr);
6211 }
6212
6213 i++;
6214 }
6215
6216 /*
6217 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6218 * add them to the input target if not already present. (Some might be
6219 * there already because they're used directly as window/group clauses.)
6220 *
6221 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6222 * Aggrefs are placed in the Agg node's tlist and not left to be computed
6223 * at higher levels. On the other hand, we should recurse into
6224 * WindowFuncs to make sure their input expressions are available.
6225 */
6226 flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6230 add_new_columns_to_pathtarget(input_target, flattenable_vars);
6231
6232 /* clean up cruft */
6233 list_free(flattenable_vars);
6234 list_free(flattenable_cols);
6235
6236 /* XXX this causes some redundant cost calculation ... */
6237 return set_pathtarget_cost_width(root, input_target);
6238}
6239
6240/*
6241 * make_pathkeys_for_window
6242 * Create a pathkeys list describing the required input ordering
6243 * for the given WindowClause.
6244 *
6245 * Modifies wc's partitionClause to remove any clauses which are deemed
6246 * redundant by the pathkey logic.
6247 *
6248 * The required ordering is first the PARTITION keys, then the ORDER keys.
6249 * In the future we might try to implement windowing using hashing, in which
6250 * case the ordering could be relaxed, but for now we always sort.
6251 */
6252static List *
6254 List *tlist)
6255{
6256 List *window_pathkeys = NIL;
6257
6258 /* Throw error if can't sort */
6260 ereport(ERROR,
6261 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6262 errmsg("could not implement window PARTITION BY"),
6263 errdetail("Window partitioning columns must be of sortable datatypes.")));
6265 ereport(ERROR,
6266 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6267 errmsg("could not implement window ORDER BY"),
6268 errdetail("Window ordering columns must be of sortable datatypes.")));
6269
6270 /*
6271 * First fetch the pathkeys for the PARTITION BY clause. We can safely
6272 * remove any clauses from the wc->partitionClause for redundant pathkeys.
6273 */
6274 if (wc->partitionClause != NIL)
6275 {
6276 bool sortable;
6277
6279 &wc->partitionClause,
6280 tlist,
6281 true,
6282 false,
6283 &sortable,
6284 false);
6285
6286 Assert(sortable);
6287 }
6288
6289 /*
6290 * In principle, we could also consider removing redundant ORDER BY items
6291 * too as doing so does not alter the result of peer row checks done by
6292 * the executor. However, we must *not* remove the ordering column for
6293 * RANGE OFFSET cases, as the executor needs that for in_range tests even
6294 * if it's known to be equal to some partitioning column.
6295 */
6296 if (wc->orderClause != NIL)
6297 {
6298 List *orderby_pathkeys;
6299
6300 orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6301 wc->orderClause,
6302 tlist);
6303
6304 /* Okay, make the combined pathkeys */
6305 if (window_pathkeys != NIL)
6306 window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6307 else
6308 window_pathkeys = orderby_pathkeys;
6309 }
6310
6311 return window_pathkeys;
6312}
6313
6314/*
6315 * make_sort_input_target
6316 * Generate appropriate PathTarget for initial input to Sort step.
6317 *
6318 * If the query has ORDER BY, this function chooses the target to be computed
6319 * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6320 * project) steps. This might or might not be identical to the query's final
6321 * output target.
6322 *
6323 * The main argument for keeping the sort-input tlist the same as the final
6324 * is that we avoid a separate projection node (which will be needed if
6325 * they're different, because Sort can't project). However, there are also
6326 * advantages to postponing tlist evaluation till after the Sort: it ensures
6327 * a consistent order of evaluation for any volatile functions in the tlist,
6328 * and if there's also a LIMIT, we can stop the query without ever computing
6329 * tlist functions for later rows, which is beneficial for both volatile and
6330 * expensive functions.
6331 *
6332 * Our current policy is to postpone volatile expressions till after the sort
6333 * unconditionally (assuming that that's possible, ie they are in plain tlist
6334 * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6335 * postpone set-returning expressions, because running them beforehand would
6336 * bloat the sort dataset, and because it might cause unexpected output order
6337 * if the sort isn't stable. However there's a constraint on that: all SRFs
6338 * in the tlist should be evaluated at the same plan step, so that they can
6339 * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6340 * mustn't postpone any SRFs. (Note that in principle that policy should
6341 * probably get applied to the group/window input targetlists too, but we
6342 * have not done that historically.) Lastly, expensive expressions are
6343 * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6344 * partial evaluation of the query is possible (if neither is true, we expect
6345 * to have to evaluate the expressions for every row anyway), or if there are
6346 * any volatile or set-returning expressions (since once we've put in a
6347 * projection at all, it won't cost any more to postpone more stuff).
6348 *
6349 * Another issue that could potentially be considered here is that
6350 * evaluating tlist expressions could result in data that's either wider
6351 * or narrower than the input Vars, thus changing the volume of data that
6352 * has to go through the Sort. However, we usually have only a very bad
6353 * idea of the output width of any expression more complex than a Var,
6354 * so for now it seems too risky to try to optimize on that basis.
6355 *
6356 * Note that if we do produce a modified sort-input target, and then the
6357 * query ends up not using an explicit Sort, no particular harm is done:
6358 * we'll initially use the modified target for the preceding path nodes,
6359 * but then change them to the final target with apply_projection_to_path.
6360 * Moreover, in such a case the guarantees about evaluation order of
6361 * volatile functions still hold, since the rows are sorted already.
6362 *
6363 * This function has some things in common with make_group_input_target and
6364 * make_window_input_target, though the detailed rules for what to do are
6365 * different. We never flatten/postpone any grouping or ordering columns;
6366 * those are needed before the sort. If we do flatten a particular
6367 * expression, we leave Aggref and WindowFunc nodes alone, since those were
6368 * computed earlier.
6369 *
6370 * 'final_target' is the query's final target list (in PathTarget form)
6371 * 'have_postponed_srfs' is an output argument, see below
6372 *
6373 * The result is the PathTarget to be computed by the plan node immediately
6374 * below the Sort step (and the Distinct step, if any). This will be
6375 * exactly final_target if we decide a projection step wouldn't be helpful.
6376 *
6377 * In addition, *have_postponed_srfs is set to true if we choose to postpone
6378 * any set-returning functions to after the Sort.
6379 */
6380static PathTarget *
6382 PathTarget *final_target,
6383 bool *have_postponed_srfs)
6384{
6385 Query *parse = root->parse;
6386 PathTarget *input_target;
6387 int ncols;
6388 bool *col_is_srf;
6389 bool *postpone_col;
6390 bool have_srf;
6391 bool have_volatile;
6392 bool have_expensive;
6393 bool have_srf_sortcols;
6394 bool postpone_srfs;
6395 List *postponable_cols;
6396 List *postponable_vars;
6397 int i;
6398 ListCell *lc;
6399
6400 /* Shouldn't get here unless query has ORDER BY */
6401 Assert(parse->sortClause);
6402
6403 *have_postponed_srfs = false; /* default result */
6404
6405 /* Inspect tlist and collect per-column information */
6406 ncols = list_length(final_target->exprs);
6407 col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6408 postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6409 have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6410
6411 i = 0;
6412 foreach(lc, final_target->exprs)
6413 {
6414 Expr *expr = (Expr *) lfirst(lc);
6415
6416 /*
6417 * If the column has a sortgroupref, assume it has to be evaluated
6418 * before sorting. Generally such columns would be ORDER BY, GROUP
6419 * BY, etc targets. One exception is columns that were removed from
6420 * GROUP BY by remove_useless_groupby_columns() ... but those would
6421 * only be Vars anyway. There don't seem to be any cases where it
6422 * would be worth the trouble to double-check.
6423 */
6424 if (get_pathtarget_sortgroupref(final_target, i) == 0)
6425 {
6426 /*
6427 * Check for SRF or volatile functions. Check the SRF case first
6428 * because we must know whether we have any postponed SRFs.
6429 */
6430 if (parse->hasTargetSRFs &&
6431 expression_returns_set((Node *) expr))
6432 {
6433 /* We'll decide below whether these are postponable */
6434 col_is_srf[i] = true;
6435 have_srf = true;
6436 }
6437 else if (contain_volatile_functions((Node *) expr))
6438 {
6439 /* Unconditionally postpone */
6440 postpone_col[i] = true;
6441 have_volatile = true;
6442 }
6443 else
6444 {
6445 /*
6446 * Else check the cost. XXX it's annoying to have to do this
6447 * when set_pathtarget_cost_width() just did it. Refactor to
6448 * allow sharing the work?
6449 */
6450 QualCost cost;
6451
6452 cost_qual_eval_node(&cost, (Node *) expr, root);
6453
6454 /*
6455 * We arbitrarily define "expensive" as "more than 10X
6456 * cpu_operator_cost". Note this will take in any PL function
6457 * with default cost.
6458 */
6459 if (cost.per_tuple > 10 * cpu_operator_cost)
6460 {
6461 postpone_col[i] = true;
6462 have_expensive = true;
6463 }
6464 }
6465 }
6466 else
6467 {
6468 /* For sortgroupref cols, just check if any contain SRFs */
6469 if (!have_srf_sortcols &&
6470 parse->hasTargetSRFs &&
6471 expression_returns_set((Node *) expr))
6472 have_srf_sortcols = true;
6473 }
6474
6475 i++;
6476 }
6477
6478 /*
6479 * We can postpone SRFs if we have some but none are in sortgroupref cols.
6480 */
6481 postpone_srfs = (have_srf && !have_srf_sortcols);
6482
6483 /*
6484 * If we don't need a post-sort projection, just return final_target.
6485 */
6486 if (!(postpone_srfs || have_volatile ||
6487 (have_expensive &&
6488 (parse->limitCount || root->tuple_fraction > 0))))
6489 return final_target;
6490
6491 /*
6492 * Report whether the post-sort projection will contain set-returning
6493 * functions. This is important because it affects whether the Sort can
6494 * rely on the query's LIMIT (if any) to bound the number of rows it needs
6495 * to return.
6496 */
6497 *have_postponed_srfs = postpone_srfs;
6498
6499 /*
6500 * Construct the sort-input target, taking all non-postponable columns and
6501 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6502 * the postponable ones.
6503 */
6504 input_target = create_empty_pathtarget();
6505 postponable_cols = NIL;
6506
6507 i = 0;
6508 foreach(lc, final_target->exprs)
6509 {
6510 Expr *expr = (Expr *) lfirst(lc);
6511
6512 if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6513 postponable_cols = lappend(postponable_cols, expr);
6514 else
6515 add_column_to_pathtarget(input_target, expr,
6516 get_pathtarget_sortgroupref(final_target, i));
6517
6518 i++;
6519 }
6520
6521 /*
6522 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6523 * postponable columns, and add them to the sort-input target if not
6524 * already present. (Some might be there already.) We mustn't
6525 * deconstruct Aggrefs or WindowFuncs here, since the projection node
6526 * would be unable to recompute them.
6527 */
6528 postponable_vars = pull_var_clause((Node *) postponable_cols,
6532 add_new_columns_to_pathtarget(input_target, postponable_vars);
6533
6534 /* clean up cruft */
6535 list_free(postponable_vars);
6536 list_free(postponable_cols);
6537
6538 /* XXX this represents even more redundant cost calculation ... */
6539 return set_pathtarget_cost_width(root, input_target);
6540}
6541
6542/*
6543 * get_cheapest_fractional_path
6544 * Find the cheapest path for retrieving a specified fraction of all
6545 * the tuples expected to be returned by the given relation.
6546 *
6547 * Do not consider parameterized paths. If the caller needs a path for upper
6548 * rel, it can't have parameterized paths. If the caller needs an append
6549 * subpath, it could become limited by the treatment of similar
6550 * parameterization of all the subpaths.
6551 *
6552 * We interpret tuple_fraction the same way as grouping_planner.
6553 *
6554 * We assume set_cheapest() has been run on the given rel.
6555 */
6556Path *
6557get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6558{
6559 Path *best_path = rel->cheapest_total_path;
6560 ListCell *l;
6561
6562 /* If all tuples will be retrieved, just return the cheapest-total path */
6563 if (tuple_fraction <= 0.0)
6564 return best_path;
6565
6566 /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6567 if (tuple_fraction >= 1.0 && best_path->rows > 0)
6568 tuple_fraction /= best_path->rows;
6569
6570 foreach(l, rel->pathlist)
6571 {
6572 Path *path = (Path *) lfirst(l);
6573
6574 if (path->param_info)
6575 continue;
6576
6577 if (path == rel->cheapest_total_path ||
6578 compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6579 continue;
6580
6581 best_path = path;
6582 }
6583
6584 return best_path;
6585}
6586
6587/*
6588 * adjust_paths_for_srfs
6589 * Fix up the Paths of the given upperrel to handle tSRFs properly.
6590 *
6591 * The executor can only handle set-returning functions that appear at the
6592 * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6593 * that are not at top level, we need to split up the evaluation into multiple
6594 * plan levels in which each level satisfies this constraint. This function
6595 * modifies each Path of an upperrel that (might) compute any SRFs in its
6596 * output tlist to insert appropriate projection steps.
6597 *
6598 * The given targets and targets_contain_srfs lists are from
6599 * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6600 * target in targets.
6601 */
6602static void
6604 List *targets, List *targets_contain_srfs)
6605{
6606 ListCell *lc;
6607
6608 Assert(list_length(targets) == list_length(targets_contain_srfs));
6609 Assert(!linitial_int(targets_contain_srfs));
6610
6611 /* If no SRFs appear at this plan level, nothing to do */
6612 if (list_length(targets) == 1)
6613 return;
6614
6615 /*
6616 * Stack SRF-evaluation nodes atop each path for the rel.
6617 *
6618 * In principle we should re-run set_cheapest() here to identify the
6619 * cheapest path, but it seems unlikely that adding the same tlist eval
6620 * costs to all the paths would change that, so we don't bother. Instead,
6621 * just assume that the cheapest-startup and cheapest-total paths remain
6622 * so. (There should be no parameterized paths anymore, so we needn't
6623 * worry about updating cheapest_parameterized_paths.)
6624 */
6625 foreach(lc, rel->pathlist)
6626 {
6627 Path *subpath = (Path *) lfirst(lc);
6628 Path *newpath = subpath;
6629 ListCell *lc1,
6630 *lc2;
6631
6632 Assert(subpath->param_info == NULL);
6633 forboth(lc1, targets, lc2, targets_contain_srfs)
6634 {
6635 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6636 bool contains_srfs = (bool) lfirst_int(lc2);
6637
6638 /* If this level doesn't contain SRFs, do regular projection */
6639 if (contains_srfs)
6640 newpath = (Path *) create_set_projection_path(root,
6641 rel,
6642 newpath,
6643 thistarget);
6644 else
6645 newpath = (Path *) apply_projection_to_path(root,
6646 rel,
6647 newpath,
6648 thistarget);
6649 }
6650 lfirst(lc) = newpath;
6651 if (subpath == rel->cheapest_startup_path)
6652 rel->cheapest_startup_path = newpath;
6653 if (subpath == rel->cheapest_total_path)
6654 rel->cheapest_total_path = newpath;
6655 }
6656
6657 /* Likewise for partial paths, if any */
6658 foreach(lc, rel->partial_pathlist)
6659 {
6660 Path *subpath = (Path *) lfirst(lc);
6661 Path *newpath = subpath;
6662 ListCell *lc1,
6663 *lc2;
6664
6665 Assert(subpath->param_info == NULL);
6666 forboth(lc1, targets, lc2, targets_contain_srfs)
6667 {
6668 PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6669 bool contains_srfs = (bool) lfirst_int(lc2);
6670
6671 /* If this level doesn't contain SRFs, do regular projection */
6672 if (contains_srfs)
6673 newpath = (Path *) create_set_projection_path(root,
6674 rel,
6675 newpath,
6676 thistarget);
6677 else
6678 {
6679 /* avoid apply_projection_to_path, in case of multiple refs */
6680 newpath = (Path *) create_projection_path(root,
6681 rel,
6682 newpath,
6683 thistarget);
6684 }
6685 }
6686 lfirst(lc) = newpath;
6687 }
6688}
6689
6690/*
6691 * expression_planner
6692 * Perform planner's transformations on a standalone expression.
6693 *
6694 * Various utility commands need to evaluate expressions that are not part
6695 * of a plannable query. They can do so using the executor's regular
6696 * expression-execution machinery, but first the expression has to be fed
6697 * through here to transform it from parser output to something executable.
6698 *
6699 * Currently, we disallow sublinks in standalone expressions, so there's no
6700 * real "planning" involved here. (That might not always be true though.)
6701 * What we must do is run eval_const_expressions to ensure that any function
6702 * calls are converted to positional notation and function default arguments
6703 * get inserted. The fact that constant subexpressions get simplified is a
6704 * side-effect that is useful when the expression will get evaluated more than
6705 * once. Also, we must fix operator function IDs.
6706 *
6707 * This does not return any information about dependencies of the expression.
6708 * Hence callers should use the results only for the duration of the current
6709 * query. Callers that would like to cache the results for longer should use
6710 * expression_planner_with_deps, probably via the plancache.
6711 *
6712 * Note: this must not make any damaging changes to the passed-in expression
6713 * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6714 * we first do an expression_tree_mutator-based walk, what is returned will
6715 * be a new node tree.) The result is constructed in the current memory
6716 * context; beware that this can leak a lot of additional stuff there, too.
6717 */
6718Expr *
6720{
6721 Node *result;
6722
6723 /*
6724 * Convert named-argument function calls, insert default arguments and
6725 * simplify constant subexprs
6726 */
6727 result = eval_const_expressions(NULL, (Node *) expr);
6728
6729 /* Fill in opfuncid values if missing */
6730 fix_opfuncids(result);
6731
6732 return (Expr *) result;
6733}
6734
6735/*
6736 * expression_planner_with_deps
6737 * Perform planner's transformations on a standalone expression,
6738 * returning expression dependency information along with the result.
6739 *
6740 * This is identical to expression_planner() except that it also returns
6741 * information about possible dependencies of the expression, ie identities of
6742 * objects whose definitions affect the result. As in a PlannedStmt, these
6743 * are expressed as a list of relation Oids and a list of PlanInvalItems.
6744 */
6745Expr *
6747 List **relationOids,
6748 List **invalItems)
6749{
6750 Node *result;
6751 PlannerGlobal glob;
6753
6754 /* Make up dummy planner state so we can use setrefs machinery */
6755 MemSet(&glob, 0, sizeof(glob));
6756 glob.type = T_PlannerGlobal;
6757 glob.relationOids = NIL;
6758 glob.invalItems = NIL;
6759
6760 MemSet(&root, 0, sizeof(root));
6761 root.type = T_PlannerInfo;
6762 root.glob = &glob;
6763
6764 /*
6765 * Convert named-argument function calls, insert default arguments and
6766 * simplify constant subexprs. Collect identities of inlined functions
6767 * and elided domains, too.
6768 */
6769 result = eval_const_expressions(&root, (Node *) expr);
6770
6771 /* Fill in opfuncid values if missing */
6772 fix_opfuncids(result);
6773
6774 /*
6775 * Now walk the finished expression to find anything else we ought to
6776 * record as an expression dependency.
6777 */
6778 (void) extract_query_dependencies_walker(result, &root);
6779
6780 *relationOids = glob.relationOids;
6781 *invalItems = glob.invalItems;
6782
6783 return (Expr *) result;
6784}
6785
6786
6787/*
6788 * plan_cluster_use_sort
6789 * Use the planner to decide how CLUSTER should implement sorting
6790 *
6791 * tableOid is the OID of a table to be clustered on its index indexOid
6792 * (which is already known to be a btree index). Decide whether it's
6793 * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6794 * Return true to use sorting, false to use an indexscan.
6795 *
6796 * Note: caller had better already hold some type of lock on the table.
6797 */
6798bool
6799plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6800{
6802 Query *query;
6803 PlannerGlobal *glob;
6804 RangeTblEntry *rte;
6805 RelOptInfo *rel;
6806 IndexOptInfo *indexInfo;
6807 QualCost indexExprCost;
6808 Cost comparisonCost;
6809 Path *seqScanPath;
6810 Path seqScanAndSortPath;
6811 IndexPath *indexScanPath;
6812 ListCell *lc;
6813
6814 /* We can short-circuit the cost comparison if indexscans are disabled */
6815 if (!enable_indexscan)
6816 return true; /* use sort */
6817
6818 /* Set up mostly-dummy planner state */
6819 query = makeNode(Query);
6820 query->commandType = CMD_SELECT;
6821
6822 glob = makeNode(PlannerGlobal);
6823
6825 root->parse = query;
6826 root->glob = glob;
6827 root->query_level = 1;
6828 root->planner_cxt = CurrentMemoryContext;
6829 root->wt_param_id = -1;
6830 root->join_domains = list_make1(makeNode(JoinDomain));
6831
6832 /* Build a minimal RTE for the rel */
6833 rte = makeNode(RangeTblEntry);
6834 rte->rtekind = RTE_RELATION;
6835 rte->relid = tableOid;
6836 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6837 rte->rellockmode = AccessShareLock;
6838 rte->lateral = false;
6839 rte->inh = false;
6840 rte->inFromCl = true;
6841 query->rtable = list_make1(rte);
6842 addRTEPermissionInfo(&query->rteperminfos, rte);
6843
6844 /* Set up RTE/RelOptInfo arrays */
6846
6847 /* Build RelOptInfo */
6848 rel = build_simple_rel(root, 1, NULL);
6849
6850 /* Locate IndexOptInfo for the target index */
6851 indexInfo = NULL;
6852 foreach(lc, rel->indexlist)
6853 {
6854 indexInfo = lfirst_node(IndexOptInfo, lc);
6855 if (indexInfo->indexoid == indexOid)
6856 break;
6857 }
6858
6859 /*
6860 * It's possible that get_relation_info did not generate an IndexOptInfo
6861 * for the desired index; this could happen if it's not yet reached its
6862 * indcheckxmin usability horizon, or if it's a system index and we're
6863 * ignoring system indexes. In such cases we should tell CLUSTER to not
6864 * trust the index contents but use seqscan-and-sort.
6865 */
6866 if (lc == NULL) /* not in the list? */
6867 return true; /* use sort */
6868
6869 /*
6870 * Rather than doing all the pushups that would be needed to use
6871 * set_baserel_size_estimates, just do a quick hack for rows and width.
6872 */
6873 rel->rows = rel->tuples;
6874 rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6875
6876 root->total_table_pages = rel->pages;
6877
6878 /*
6879 * Determine eval cost of the index expressions, if any. We need to
6880 * charge twice that amount for each tuple comparison that happens during
6881 * the sort, since tuplesort.c will have to re-evaluate the index
6882 * expressions each time. (XXX that's pretty inefficient...)
6883 */
6884 cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6885 comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6886
6887 /* Estimate the cost of seq scan + sort */
6888 seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6889 cost_sort(&seqScanAndSortPath, root, NIL,
6890 seqScanPath->disabled_nodes,
6891 seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6892 comparisonCost, maintenance_work_mem, -1.0);
6893
6894 /* Estimate the cost of index scan */
6895 indexScanPath = create_index_path(root, indexInfo,
6896 NIL, NIL, NIL, NIL,
6897 ForwardScanDirection, false,
6898 NULL, 1.0, false);
6899
6900 return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6901}
6902
6903/*
6904 * plan_create_index_workers
6905 * Use the planner to decide how many parallel worker processes
6906 * CREATE INDEX should request for use
6907 *
6908 * tableOid is the table on which the index is to be built. indexOid is the
6909 * OID of an index to be created or reindexed (which must be an index with
6910 * support for parallel builds - currently btree, GIN, or BRIN).
6911 *
6912 * Return value is the number of parallel worker processes to request. It
6913 * may be unsafe to proceed if this is 0. Note that this does not include the
6914 * leader participating as a worker (value is always a number of parallel
6915 * worker processes).
6916 *
6917 * Note: caller had better already hold some type of lock on the table and
6918 * index.
6919 */
6920int
6922{
6924 Query *query;
6925 PlannerGlobal *glob;
6926 RangeTblEntry *rte;
6927 Relation heap;
6929 RelOptInfo *rel;
6930 int parallel_workers;
6931 BlockNumber heap_blocks;
6932 double reltuples;
6933 double allvisfrac;
6934
6935 /*
6936 * We don't allow performing parallel operation in standalone backend or
6937 * when parallelism is disabled.
6938 */
6940 return 0;
6941
6942 /* Set up largely-dummy planner state */
6943 query = makeNode(Query);
6944 query->commandType = CMD_SELECT;
6945
6946 glob = makeNode(PlannerGlobal);
6947
6949 root->parse = query;
6950 root->glob = glob;
6951 root->query_level = 1;
6952 root->planner_cxt = CurrentMemoryContext;
6953 root->wt_param_id = -1;
6954 root->join_domains = list_make1(makeNode(JoinDomain));
6955
6956 /*
6957 * Build a minimal RTE.
6958 *
6959 * Mark the RTE with inh = true. This is a kludge to prevent
6960 * get_relation_info() from fetching index info, which is necessary
6961 * because it does not expect that any IndexOptInfo is currently
6962 * undergoing REINDEX.
6963 */
6964 rte = makeNode(RangeTblEntry);
6965 rte->rtekind = RTE_RELATION;
6966 rte->relid = tableOid;
6967 rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6968 rte->rellockmode = AccessShareLock;
6969 rte->lateral = false;
6970 rte->inh = true;
6971 rte->inFromCl = true;
6972 query->rtable = list_make1(rte);
6973 addRTEPermissionInfo(&query->rteperminfos, rte);
6974
6975 /* Set up RTE/RelOptInfo arrays */
6977
6978 /* Build RelOptInfo */
6979 rel = build_simple_rel(root, 1, NULL);
6980
6981 /* Rels are assumed already locked by the caller */
6982 heap = table_open(tableOid, NoLock);
6983 index = index_open(indexOid, NoLock);
6984
6985 /*
6986 * Determine if it's safe to proceed.
6987 *
6988 * Currently, parallel workers can't access the leader's temporary tables.
6989 * Furthermore, any index predicate or index expressions must be parallel
6990 * safe.
6991 */
6992 if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6995 {
6996 parallel_workers = 0;
6997 goto done;
6998 }
6999
7000 /*
7001 * If parallel_workers storage parameter is set for the table, accept that
7002 * as the number of parallel worker processes to launch (though still cap
7003 * at max_parallel_maintenance_workers). Note that we deliberately do not
7004 * consider any other factor when parallel_workers is set. (e.g., memory
7005 * use by workers.)
7006 */
7007 if (rel->rel_parallel_workers != -1)
7008 {
7009 parallel_workers = Min(rel->rel_parallel_workers,
7011 goto done;
7012 }
7013
7014 /*
7015 * Estimate heap relation size ourselves, since rel->pages cannot be
7016 * trusted (heap RTE was marked as inheritance parent)
7017 */
7018 estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7019
7020 /*
7021 * Determine number of workers to scan the heap relation using generic
7022 * model
7023 */
7024 parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7026
7027 /*
7028 * Cap workers based on available maintenance_work_mem as needed.
7029 *
7030 * Note that each tuplesort participant receives an even share of the
7031 * total maintenance_work_mem budget. Aim to leave participants
7032 * (including the leader as a participant) with no less than 32MB of
7033 * memory. This leaves cases where maintenance_work_mem is set to 64MB
7034 * immediately past the threshold of being capable of launching a single
7035 * parallel worker to sort.
7036 */
7037 while (parallel_workers > 0 &&
7038 maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7039 parallel_workers--;
7040
7041done:
7043 table_close(heap, NoLock);
7044
7045 return parallel_workers;
7046}
7047
7048/*
7049 * add_paths_to_grouping_rel
7050 *
7051 * Add non-partial paths to grouping relation.
7052 */
7053static void
7055 RelOptInfo *grouped_rel,
7056 RelOptInfo *partially_grouped_rel,
7057 const AggClauseCosts *agg_costs,
7058 grouping_sets_data *gd, double dNumGroups,
7059 GroupPathExtraData *extra)
7060{
7061 Query *parse = root->parse;
7062 Path *cheapest_path = input_rel->cheapest_total_path;
7063 ListCell *lc;
7064 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7065 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7066 List *havingQual = (List *) extra->havingQual;
7067 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7068
7069 if (can_sort)
7070 {
7071 /*
7072 * Use any available suitably-sorted path as input, and also consider
7073 * sorting the cheapest-total path and incremental sort on any paths
7074 * with presorted keys.
7075 */
7076 foreach(lc, input_rel->pathlist)
7077 {
7078 ListCell *lc2;
7079 Path *path = (Path *) lfirst(lc);
7080 Path *path_save = path;
7081 List *pathkey_orderings = NIL;
7082
7083 /* generate alternative group orderings that might be useful */
7084 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7085
7086 Assert(list_length(pathkey_orderings) > 0);
7087
7088 foreach(lc2, pathkey_orderings)
7089 {
7090 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7091
7092 /* restore the path (we replace it in the loop) */
7093 path = path_save;
7094
7095 path = make_ordered_path(root,
7096 grouped_rel,
7097 path,
7098 cheapest_path,
7099 info->pathkeys,
7100 -1.0);
7101 if (path == NULL)
7102 continue;
7103
7104 /* Now decide what to stick atop it */
7105 if (parse->groupingSets)
7106 {
7107 consider_groupingsets_paths(root, grouped_rel,
7108 path, true, can_hash,
7109 gd, agg_costs, dNumGroups);
7110 }
7111 else if (parse->hasAggs)
7112 {
7113 /*
7114 * We have aggregation, possibly with plain GROUP BY. Make
7115 * an AggPath.
7116 */
7117 add_path(grouped_rel, (Path *)
7119 grouped_rel,
7120 path,
7121 grouped_rel->reltarget,
7122 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7124 info->clauses,
7125 havingQual,
7126 agg_costs,
7127 dNumGroups));
7128 }
7129 else if (parse->groupClause)
7130 {
7131 /*
7132 * We have GROUP BY without aggregation or grouping sets.
7133 * Make a GroupPath.
7134 */
7135 add_path(grouped_rel, (Path *)
7137 grouped_rel,
7138 path,
7139 info->clauses,
7140 havingQual,
7141 dNumGroups));
7142 }
7143 else
7144 {
7145 /* Other cases should have been handled above */
7146 Assert(false);
7147 }
7148 }
7149 }
7150
7151 /*
7152 * Instead of operating directly on the input relation, we can
7153 * consider finalizing a partially aggregated path.
7154 */
7155 if (partially_grouped_rel != NULL)
7156 {
7157 foreach(lc, partially_grouped_rel->pathlist)
7158 {
7159 ListCell *lc2;
7160 Path *path = (Path *) lfirst(lc);
7161 Path *path_save = path;
7162 List *pathkey_orderings = NIL;
7163
7164 /* generate alternative group orderings that might be useful */
7165 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7166
7167 Assert(list_length(pathkey_orderings) > 0);
7168
7169 /* process all potentially interesting grouping reorderings */
7170 foreach(lc2, pathkey_orderings)
7171 {
7172 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7173
7174 /* restore the path (we replace it in the loop) */
7175 path = path_save;
7176
7177 path = make_ordered_path(root,
7178 grouped_rel,
7179 path,
7180 partially_grouped_rel->cheapest_total_path,
7181 info->pathkeys,
7182 -1.0);
7183
7184 if (path == NULL)
7185 continue;
7186
7187 if (parse->hasAggs)
7188 add_path(grouped_rel, (Path *)
7190 grouped_rel,
7191 path,
7192 grouped_rel->reltarget,
7193 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7195 info->clauses,
7196 havingQual,
7197 agg_final_costs,
7198 dNumGroups));
7199 else
7200 add_path(grouped_rel, (Path *)
7202 grouped_rel,
7203 path,
7204 info->clauses,
7205 havingQual,
7206 dNumGroups));
7207
7208 }
7209 }
7210 }
7211 }
7212
7213 if (can_hash)
7214 {
7215 if (parse->groupingSets)
7216 {
7217 /*
7218 * Try for a hash-only groupingsets path over unsorted input.
7219 */
7220 consider_groupingsets_paths(root, grouped_rel,
7221 cheapest_path, false, true,
7222 gd, agg_costs, dNumGroups);
7223 }
7224 else
7225 {
7226 /*
7227 * Generate a HashAgg Path. We just need an Agg over the
7228 * cheapest-total input path, since input order won't matter.
7229 */
7230 add_path(grouped_rel, (Path *)
7231 create_agg_path(root, grouped_rel,
7232 cheapest_path,
7233 grouped_rel->reltarget,
7234 AGG_HASHED,
7236 root->processed_groupClause,
7237 havingQual,
7238 agg_costs,
7239 dNumGroups));
7240 }
7241
7242 /*
7243 * Generate a Finalize HashAgg Path atop of the cheapest partially
7244 * grouped path, assuming there is one
7245 */
7246 if (partially_grouped_rel && partially_grouped_rel->pathlist)
7247 {
7248 Path *path = partially_grouped_rel->cheapest_total_path;
7249
7250 add_path(grouped_rel, (Path *)
7252 grouped_rel,
7253 path,
7254 grouped_rel->reltarget,
7255 AGG_HASHED,
7257 root->processed_groupClause,
7258 havingQual,
7259 agg_final_costs,
7260 dNumGroups));
7261 }
7262 }
7263
7264 /*
7265 * When partitionwise aggregate is used, we might have fully aggregated
7266 * paths in the partial pathlist, because add_paths_to_append_rel() will
7267 * consider a path for grouped_rel consisting of a Parallel Append of
7268 * non-partial paths from each child.
7269 */
7270 if (grouped_rel->partial_pathlist != NIL)
7271 gather_grouping_paths(root, grouped_rel);
7272}
7273
7274/*
7275 * create_partial_grouping_paths
7276 *
7277 * Create a new upper relation representing the result of partial aggregation
7278 * and populate it with appropriate paths. Note that we don't finalize the
7279 * lists of paths here, so the caller can add additional partial or non-partial
7280 * paths and must afterward call gather_grouping_paths and set_cheapest on
7281 * the returned upper relation.
7282 *
7283 * All paths for this new upper relation -- both partial and non-partial --
7284 * have been partially aggregated but require a subsequent FinalizeAggregate
7285 * step.
7286 *
7287 * NB: This function is allowed to return NULL if it determines that there is
7288 * no real need to create a new RelOptInfo.
7289 */
7290static RelOptInfo *
7292 RelOptInfo *grouped_rel,
7293 RelOptInfo *input_rel,
7295 GroupPathExtraData *extra,
7296 bool force_rel_creation)
7297{
7298 Query *parse = root->parse;
7299 RelOptInfo *partially_grouped_rel;
7300 AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7301 AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7302 Path *cheapest_partial_path = NULL;
7303 Path *cheapest_total_path = NULL;
7304 double dNumPartialGroups = 0;
7305 double dNumPartialPartialGroups = 0;
7306 ListCell *lc;
7307 bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7308 bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7309
7310 /*
7311 * Consider whether we should generate partially aggregated non-partial
7312 * paths. We can only do this if we have a non-partial path, and only if
7313 * the parent of the input rel is performing partial partitionwise
7314 * aggregation. (Note that extra->patype is the type of partitionwise
7315 * aggregation being used at the parent level, not this level.)
7316 */
7317 if (input_rel->pathlist != NIL &&
7319 cheapest_total_path = input_rel->cheapest_total_path;
7320
7321 /*
7322 * If parallelism is possible for grouped_rel, then we should consider
7323 * generating partially-grouped partial paths. However, if the input rel
7324 * has no partial paths, then we can't.
7325 */
7326 if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7327 cheapest_partial_path = linitial(input_rel->partial_pathlist);
7328
7329 /*
7330 * If we can't partially aggregate partial paths, and we can't partially
7331 * aggregate non-partial paths, then don't bother creating the new
7332 * RelOptInfo at all, unless the caller specified force_rel_creation.
7333 */
7334 if (cheapest_total_path == NULL &&
7335 cheapest_partial_path == NULL &&
7336 !force_rel_creation)
7337 return NULL;
7338
7339 /*
7340 * Build a new upper relation to represent the result of partially
7341 * aggregating the rows from the input relation.
7342 */
7343 partially_grouped_rel = fetch_upper_rel(root,
7345 grouped_rel->relids);
7346 partially_grouped_rel->consider_parallel =
7347 grouped_rel->consider_parallel;
7348 partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7349 partially_grouped_rel->serverid = grouped_rel->serverid;
7350 partially_grouped_rel->userid = grouped_rel->userid;
7351 partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7352 partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7353
7354 /*
7355 * Build target list for partial aggregate paths. These paths cannot just
7356 * emit the same tlist as regular aggregate paths, because (1) we must
7357 * include Vars and Aggrefs needed in HAVING, which might not appear in
7358 * the result tlist, and (2) the Aggrefs must be set in partial mode.
7359 */
7360 partially_grouped_rel->reltarget =
7362 extra->havingQual);
7363
7364 if (!extra->partial_costs_set)
7365 {
7366 /*
7367 * Collect statistics about aggregates for estimating costs of
7368 * performing aggregation in parallel.
7369 */
7370 MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7371 MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7372 if (parse->hasAggs)
7373 {
7374 /* partial phase */
7376 agg_partial_costs);
7377
7378 /* final phase */
7380 agg_final_costs);
7381 }
7382
7383 extra->partial_costs_set = true;
7384 }
7385
7386 /* Estimate number of partial groups. */
7387 if (cheapest_total_path != NULL)
7388 dNumPartialGroups =
7390 cheapest_total_path->rows,
7391 gd,
7392 extra->targetList);
7393 if (cheapest_partial_path != NULL)
7394 dNumPartialPartialGroups =
7396 cheapest_partial_path->rows,
7397 gd,
7398 extra->targetList);
7399
7400 if (can_sort && cheapest_total_path != NULL)
7401 {
7402 /* This should have been checked previously */
7403 Assert(parse->hasAggs || parse->groupClause);
7404
7405 /*
7406 * Use any available suitably-sorted path as input, and also consider
7407 * sorting the cheapest partial path.
7408 */
7409 foreach(lc, input_rel->pathlist)
7410 {
7411 ListCell *lc2;
7412 Path *path = (Path *) lfirst(lc);
7413 Path *path_save = path;
7414 List *pathkey_orderings = NIL;
7415
7416 /* generate alternative group orderings that might be useful */
7417 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7418
7419 Assert(list_length(pathkey_orderings) > 0);
7420
7421 /* process all potentially interesting grouping reorderings */
7422 foreach(lc2, pathkey_orderings)
7423 {
7424 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7425
7426 /* restore the path (we replace it in the loop) */
7427 path = path_save;
7428
7429 path = make_ordered_path(root,
7430 partially_grouped_rel,
7431 path,
7432 cheapest_total_path,
7433 info->pathkeys,
7434 -1.0);
7435
7436 if (path == NULL)
7437 continue;
7438
7439 if (parse->hasAggs)
7440 add_path(partially_grouped_rel, (Path *)
7442 partially_grouped_rel,
7443 path,
7444 partially_grouped_rel->reltarget,
7445 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7447 info->clauses,
7448 NIL,
7449 agg_partial_costs,
7450 dNumPartialGroups));
7451 else
7452 add_path(partially_grouped_rel, (Path *)
7454 partially_grouped_rel,
7455 path,
7456 info->clauses,
7457 NIL,
7458 dNumPartialGroups));
7459 }
7460 }
7461 }
7462
7463 if (can_sort && cheapest_partial_path != NULL)
7464 {
7465 /* Similar to above logic, but for partial paths. */
7466 foreach(lc, input_rel->partial_pathlist)
7467 {
7468 ListCell *lc2;
7469 Path *path = (Path *) lfirst(lc);
7470 Path *path_save = path;
7471 List *pathkey_orderings = NIL;
7472
7473 /* generate alternative group orderings that might be useful */
7474 pathkey_orderings = get_useful_group_keys_orderings(root, path);
7475
7476 Assert(list_length(pathkey_orderings) > 0);
7477
7478 /* process all potentially interesting grouping reorderings */
7479 foreach(lc2, pathkey_orderings)
7480 {
7481 GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7482
7483
7484 /* restore the path (we replace it in the loop) */
7485 path = path_save;
7486
7487 path = make_ordered_path(root,
7488 partially_grouped_rel,
7489 path,
7490 cheapest_partial_path,
7491 info->pathkeys,
7492 -1.0);
7493
7494 if (path == NULL)
7495 continue;
7496
7497 if (parse->hasAggs)
7498 add_partial_path(partially_grouped_rel, (Path *)
7500 partially_grouped_rel,
7501 path,
7502 partially_grouped_rel->reltarget,
7503 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7505 info->clauses,
7506 NIL,
7507 agg_partial_costs,
7508 dNumPartialPartialGroups));
7509 else
7510 add_partial_path(partially_grouped_rel, (Path *)
7512 partially_grouped_rel,
7513 path,
7514 info->clauses,
7515 NIL,
7516 dNumPartialPartialGroups));
7517 }
7518 }
7519 }
7520
7521 /*
7522 * Add a partially-grouped HashAgg Path where possible
7523 */
7524 if (can_hash && cheapest_total_path != NULL)
7525 {
7526 /* Checked above */
7527 Assert(parse->hasAggs || parse->groupClause);
7528
7529 add_path(partially_grouped_rel, (Path *)
7531 partially_grouped_rel,
7532 cheapest_total_path,
7533 partially_grouped_rel->reltarget,
7534 AGG_HASHED,
7536 root->processed_groupClause,
7537 NIL,
7538 agg_partial_costs,
7539 dNumPartialGroups));
7540 }
7541
7542 /*
7543 * Now add a partially-grouped HashAgg partial Path where possible
7544 */
7545 if (can_hash && cheapest_partial_path != NULL)
7546 {
7547 add_partial_path(partially_grouped_rel, (Path *)
7549 partially_grouped_rel,
7550 cheapest_partial_path,
7551 partially_grouped_rel->reltarget,
7552 AGG_HASHED,
7554 root->processed_groupClause,
7555 NIL,
7556 agg_partial_costs,
7557 dNumPartialPartialGroups));
7558 }
7559
7560 /*
7561 * If there is an FDW that's responsible for all baserels of the query,
7562 * let it consider adding partially grouped ForeignPaths.
7563 */
7564 if (partially_grouped_rel->fdwroutine &&
7565 partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7566 {
7567 FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7568
7569 fdwroutine->GetForeignUpperPaths(root,
7571 input_rel, partially_grouped_rel,
7572 extra);
7573 }
7574
7575 return partially_grouped_rel;
7576}
7577
7578/*
7579 * make_ordered_path
7580 * Return a path ordered by 'pathkeys' based on the given 'path'. May
7581 * return NULL if it doesn't make sense to generate an ordered path in
7582 * this case.
7583 */
7584static Path *
7586 Path *cheapest_path, List *pathkeys, double limit_tuples)
7587{
7588 bool is_sorted;
7589 int presorted_keys;
7590
7591 is_sorted = pathkeys_count_contained_in(pathkeys,
7592 path->pathkeys,
7593 &presorted_keys);
7594
7595 if (!is_sorted)
7596 {
7597 /*
7598 * Try at least sorting the cheapest path and also try incrementally
7599 * sorting any path which is partially sorted already (no need to deal
7600 * with paths which have presorted keys when incremental sort is
7601 * disabled unless it's the cheapest input path).
7602 */
7603 if (path != cheapest_path &&
7604 (presorted_keys == 0 || !enable_incremental_sort))
7605 return NULL;
7606
7607 /*
7608 * We've no need to consider both a sort and incremental sort. We'll
7609 * just do a sort if there are no presorted keys and an incremental
7610 * sort when there are presorted keys.
7611 */
7612 if (presorted_keys == 0 || !enable_incremental_sort)
7613 path = (Path *) create_sort_path(root,
7614 rel,
7615 path,
7616 pathkeys,
7617 limit_tuples);
7618 else
7620 rel,
7621 path,
7622 pathkeys,
7623 presorted_keys,
7624 limit_tuples);
7625 }
7626
7627 return path;
7628}
7629
7630/*
7631 * Generate Gather and Gather Merge paths for a grouping relation or partial
7632 * grouping relation.
7633 *
7634 * generate_useful_gather_paths does most of the work, but we also consider a
7635 * special case: we could try sorting the data by the group_pathkeys and then
7636 * applying Gather Merge.
7637 *
7638 * NB: This function shouldn't be used for anything other than a grouped or
7639 * partially grouped relation not only because of the fact that it explicitly
7640 * references group_pathkeys but we pass "true" as the third argument to
7641 * generate_useful_gather_paths().
7642 */
7643static void
7645{
7646 ListCell *lc;
7647 Path *cheapest_partial_path;
7648 List *groupby_pathkeys;
7649
7650 /*
7651 * This occurs after any partial aggregation has taken place, so trim off
7652 * any pathkeys added for ORDER BY / DISTINCT aggregates.
7653 */
7654 if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7655 groupby_pathkeys = list_copy_head(root->group_pathkeys,
7656 root->num_groupby_pathkeys);
7657 else
7658 groupby_pathkeys = root->group_pathkeys;
7659
7660 /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7662
7663 cheapest_partial_path = linitial(rel->partial_pathlist);
7664
7665 /* XXX Shouldn't this also consider the group-key-reordering? */
7666 foreach(lc, rel->partial_pathlist)
7667 {
7668 Path *path = (Path *) lfirst(lc);
7669 bool is_sorted;
7670 int presorted_keys;
7671 double total_groups;
7672
7673 is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7674 path->pathkeys,
7675 &presorted_keys);
7676
7677 if (is_sorted)
7678 continue;
7679
7680 /*
7681 * Try at least sorting the cheapest path and also try incrementally
7682 * sorting any path which is partially sorted already (no need to deal
7683 * with paths which have presorted keys when incremental sort is
7684 * disabled unless it's the cheapest input path).
7685 */
7686 if (path != cheapest_partial_path &&
7687 (presorted_keys == 0 || !enable_incremental_sort))
7688 continue;
7689
7690 /*
7691 * We've no need to consider both a sort and incremental sort. We'll
7692 * just do a sort if there are no presorted keys and an incremental
7693 * sort when there are presorted keys.
7694 */
7695 if (presorted_keys == 0 || !enable_incremental_sort)
7696 path = (Path *) create_sort_path(root, rel, path,
7697 groupby_pathkeys,
7698 -1.0);
7699 else
7701 rel,
7702 path,
7703 groupby_pathkeys,
7704 presorted_keys,
7705 -1.0);
7706 total_groups = compute_gather_rows(path);
7707 path = (Path *)
7709 rel,
7710 path,
7711 rel->reltarget,
7712 groupby_pathkeys,
7713 NULL,
7714 &total_groups);
7715
7716 add_path(rel, path);
7717 }
7718}
7719
7720/*
7721 * can_partial_agg
7722 *
7723 * Determines whether or not partial grouping and/or aggregation is possible.
7724 * Returns true when possible, false otherwise.
7725 */
7726static bool
7728{
7729 Query *parse = root->parse;
7730
7731 if (!parse->hasAggs && parse->groupClause == NIL)
7732 {
7733 /*
7734 * We don't know how to do parallel aggregation unless we have either
7735 * some aggregates or a grouping clause.
7736 */
7737 return false;
7738 }
7739 else if (parse->groupingSets)
7740 {
7741 /* We don't know how to do grouping sets in parallel. */
7742 return false;
7743 }
7744 else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7745 {
7746 /* Insufficient support for partial mode. */
7747 return false;
7748 }
7749
7750 /* Everything looks good. */
7751 return true;
7752}
7753
7754/*
7755 * apply_scanjoin_target_to_paths
7756 *
7757 * Adjust the final scan/join relation, and recursively all of its children,
7758 * to generate the final scan/join target. It would be more correct to model
7759 * this as a separate planning step with a new RelOptInfo at the toplevel and
7760 * for each child relation, but doing it this way is noticeably cheaper.
7761 * Maybe that problem can be solved at some point, but for now we do this.
7762 *
7763 * If tlist_same_exprs is true, then the scan/join target to be applied has
7764 * the same expressions as the existing reltarget, so we need only insert the
7765 * appropriate sortgroupref information. By avoiding the creation of
7766 * projection paths we save effort both immediately and at plan creation time.
7767 */
7768static void
7770 RelOptInfo *rel,
7771 List *scanjoin_targets,
7772 List *scanjoin_targets_contain_srfs,
7773 bool scanjoin_target_parallel_safe,
7774 bool tlist_same_exprs)
7775{
7776 bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7777 PathTarget *scanjoin_target;
7778 ListCell *lc;
7779
7780 /* This recurses, so be paranoid. */
7782
7783 /*
7784 * If the rel is partitioned, we want to drop its existing paths and
7785 * generate new ones. This function would still be correct if we kept the
7786 * existing paths: we'd modify them to generate the correct target above
7787 * the partitioning Append, and then they'd compete on cost with paths
7788 * generating the target below the Append. However, in our current cost
7789 * model the latter way is always the same or cheaper cost, so modifying
7790 * the existing paths would just be useless work. Moreover, when the cost
7791 * is the same, varying roundoff errors might sometimes allow an existing
7792 * path to be picked, resulting in undesirable cross-platform plan
7793 * variations. So we drop old paths and thereby force the work to be done
7794 * below the Append, except in the case of a non-parallel-safe target.
7795 *
7796 * Some care is needed, because we have to allow
7797 * generate_useful_gather_paths to see the old partial paths in the next
7798 * stanza. Hence, zap the main pathlist here, then allow
7799 * generate_useful_gather_paths to add path(s) to the main list, and
7800 * finally zap the partial pathlist.
7801 */
7802 if (rel_is_partitioned)
7803 rel->pathlist = NIL;
7804
7805 /*
7806 * If the scan/join target is not parallel-safe, partial paths cannot
7807 * generate it.
7808 */
7809 if (!scanjoin_target_parallel_safe)
7810 {
7811 /*
7812 * Since we can't generate the final scan/join target in parallel
7813 * workers, this is our last opportunity to use any partial paths that
7814 * exist; so build Gather path(s) that use them and emit whatever the
7815 * current reltarget is. We don't do this in the case where the
7816 * target is parallel-safe, since we will be able to generate superior
7817 * paths by doing it after the final scan/join target has been
7818 * applied.
7819 */
7821
7822 /* Can't use parallel query above this level. */
7823 rel->partial_pathlist = NIL;
7824 rel->consider_parallel = false;
7825 }
7826
7827 /* Finish dropping old paths for a partitioned rel, per comment above */
7828 if (rel_is_partitioned)
7829 rel->partial_pathlist = NIL;
7830
7831 /* Extract SRF-free scan/join target. */
7832 scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7833
7834 /*
7835 * Apply the SRF-free scan/join target to each existing path.
7836 *
7837 * If the tlist exprs are the same, we can just inject the sortgroupref
7838 * information into the existing pathtargets. Otherwise, replace each
7839 * path with a projection path that generates the SRF-free scan/join
7840 * target. This can't change the ordering of paths within rel->pathlist,
7841 * so we just modify the list in place.
7842 */
7843 foreach(lc, rel->pathlist)
7844 {
7845 Path *subpath = (Path *) lfirst(lc);
7846
7847 /* Shouldn't have any parameterized paths anymore */
7848 Assert(subpath->param_info == NULL);
7849
7850 if (tlist_same_exprs)
7851 subpath->pathtarget->sortgrouprefs =
7852 scanjoin_target->sortgrouprefs;
7853 else
7854 {
7855 Path *newpath;
7856
7857 newpath = (Path *) create_projection_path(root, rel, subpath,
7858 scanjoin_target);
7859 lfirst(lc) = newpath;
7860 }
7861 }
7862
7863 /* Likewise adjust the targets for any partial paths. */
7864 foreach(lc, rel->partial_pathlist)
7865 {
7866 Path *subpath = (Path *) lfirst(lc);
7867
7868 /* Shouldn't have any parameterized paths anymore */
7869 Assert(subpath->param_info == NULL);
7870
7871 if (tlist_same_exprs)
7872 subpath->pathtarget->sortgrouprefs =
7873 scanjoin_target->sortgrouprefs;
7874 else
7875 {
7876 Path *newpath;
7877
7878 newpath = (Path *) create_projection_path(root, rel, subpath,
7879 scanjoin_target);
7880 lfirst(lc) = newpath;
7881 }
7882 }
7883
7884 /*
7885 * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7886 * atop each existing path. (Note that this function doesn't look at the
7887 * cheapest-path fields, which is a good thing because they're bogus right
7888 * now.)
7889 */
7890 if (root->parse->hasTargetSRFs)
7892 scanjoin_targets,
7893 scanjoin_targets_contain_srfs);
7894
7895 /*
7896 * Update the rel's target to be the final (with SRFs) scan/join target.
7897 * This now matches the actual output of all the paths, and we might get
7898 * confused in createplan.c if they don't agree. We must do this now so
7899 * that any append paths made in the next part will use the correct
7900 * pathtarget (cf. create_append_path).
7901 *
7902 * Note that this is also necessary if GetForeignUpperPaths() gets called
7903 * on the final scan/join relation or on any of its children, since the
7904 * FDW might look at the rel's target to create ForeignPaths.
7905 */
7906 rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7907
7908 /*
7909 * If the relation is partitioned, recursively apply the scan/join target
7910 * to all partitions, and generate brand-new Append paths in which the
7911 * scan/join target is computed below the Append rather than above it.
7912 * Since Append is not projection-capable, that might save a separate
7913 * Result node, and it also is important for partitionwise aggregate.
7914 */
7915 if (rel_is_partitioned)
7916 {
7917 List *live_children = NIL;
7918 int i;
7919
7920 /* Adjust each partition. */
7921 i = -1;
7922 while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7923 {
7924 RelOptInfo *child_rel = rel->part_rels[i];
7925 AppendRelInfo **appinfos;
7926 int nappinfos;
7927 List *child_scanjoin_targets = NIL;
7928
7929 Assert(child_rel != NULL);
7930
7931 /* Dummy children can be ignored. */
7932 if (IS_DUMMY_REL(child_rel))
7933 continue;
7934
7935 /* Translate scan/join targets for this child. */
7936 appinfos = find_appinfos_by_relids(root, child_rel->relids,
7937 &nappinfos);
7938 foreach(lc, scanjoin_targets)
7939 {
7940 PathTarget *target = lfirst_node(PathTarget, lc);
7941
7942 target = copy_pathtarget(target);
7943 target->exprs = (List *)
7945 (Node *) target->exprs,
7946 nappinfos, appinfos);
7947 child_scanjoin_targets = lappend(child_scanjoin_targets,
7948 target);
7949 }
7950 pfree(appinfos);
7951
7952 /* Recursion does the real work. */
7954 child_scanjoin_targets,
7955 scanjoin_targets_contain_srfs,
7956 scanjoin_target_parallel_safe,
7958
7959 /* Save non-dummy children for Append paths. */
7960 if (!IS_DUMMY_REL(child_rel))
7961 live_children = lappend(live_children, child_rel);
7962 }
7963
7964 /* Build new paths for this relation by appending child paths. */
7965 add_paths_to_append_rel(root, rel, live_children);
7966 }
7967
7968 /*
7969 * Consider generating Gather or Gather Merge paths. We must only do this
7970 * if the relation is parallel safe, and we don't do it for child rels to
7971 * avoid creating multiple Gather nodes within the same plan. We must do
7972 * this after all paths have been generated and before set_cheapest, since
7973 * one of the generated paths may turn out to be the cheapest one.
7974 */
7975 if (rel->consider_parallel && !IS_OTHER_REL(rel))
7977
7978 /*
7979 * Reassess which paths are the cheapest, now that we've potentially added
7980 * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7981 * this relation.
7982 */
7983 set_cheapest(rel);
7984}
7985
7986/*
7987 * create_partitionwise_grouping_paths
7988 *
7989 * If the partition keys of input relation are part of the GROUP BY clause, all
7990 * the rows belonging to a given group come from a single partition. This
7991 * allows aggregation/grouping over a partitioned relation to be broken down
7992 * into aggregation/grouping on each partition. This should be no worse, and
7993 * often better, than the normal approach.
7994 *
7995 * However, if the GROUP BY clause does not contain all the partition keys,
7996 * rows from a given group may be spread across multiple partitions. In that
7997 * case, we perform partial aggregation for each group, append the results,
7998 * and then finalize aggregation. This is less certain to win than the
7999 * previous case. It may win if the PartialAggregate stage greatly reduces
8000 * the number of groups, because fewer rows will pass through the Append node.
8001 * It may lose if we have lots of small groups.
8002 */
8003static void
8005 RelOptInfo *input_rel,
8006 RelOptInfo *grouped_rel,
8007 RelOptInfo *partially_grouped_rel,
8008 const AggClauseCosts *agg_costs,
8011 GroupPathExtraData *extra)
8012{
8013 List *grouped_live_children = NIL;
8014 List *partially_grouped_live_children = NIL;
8015 PathTarget *target = grouped_rel->reltarget;
8016 bool partial_grouping_valid = true;
8017 int i;
8018
8021 partially_grouped_rel != NULL);
8022
8023 /* Add paths for partitionwise aggregation/grouping. */
8024 i = -1;
8025 while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8026 {
8027 RelOptInfo *child_input_rel = input_rel->part_rels[i];
8028 PathTarget *child_target;
8029 AppendRelInfo **appinfos;
8030 int nappinfos;
8031 GroupPathExtraData child_extra;
8032 RelOptInfo *child_grouped_rel;
8033 RelOptInfo *child_partially_grouped_rel;
8034
8035 Assert(child_input_rel != NULL);
8036
8037 /* Dummy children can be ignored. */
8038 if (IS_DUMMY_REL(child_input_rel))
8039 continue;
8040
8041 child_target = copy_pathtarget(target);
8042
8043 /*
8044 * Copy the given "extra" structure as is and then override the
8045 * members specific to this child.
8046 */
8047 memcpy(&child_extra, extra, sizeof(child_extra));
8048
8049 appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8050 &nappinfos);
8051
8052 child_target->exprs = (List *)
8054 (Node *) target->exprs,
8055 nappinfos, appinfos);
8056
8057 /* Translate havingQual and targetList. */
8058 child_extra.havingQual = (Node *)
8060 extra->havingQual,
8061 nappinfos, appinfos);
8062 child_extra.targetList = (List *)
8064 (Node *) extra->targetList,
8065 nappinfos, appinfos);
8066
8067 /*
8068 * extra->patype was the value computed for our parent rel; patype is
8069 * the value for this relation. For the child, our value is its
8070 * parent rel's value.
8071 */
8072 child_extra.patype = patype;
8073
8074 /*
8075 * Create grouping relation to hold fully aggregated grouping and/or
8076 * aggregation paths for the child.
8077 */
8078 child_grouped_rel = make_grouping_rel(root, child_input_rel,
8079 child_target,
8080 extra->target_parallel_safe,
8081 child_extra.havingQual);
8082
8083 /* Create grouping paths for this child relation. */
8084 create_ordinary_grouping_paths(root, child_input_rel,
8085 child_grouped_rel,
8086 agg_costs, gd, &child_extra,
8087 &child_partially_grouped_rel);
8088
8089 if (child_partially_grouped_rel)
8090 {
8091 partially_grouped_live_children =
8092 lappend(partially_grouped_live_children,
8093 child_partially_grouped_rel);
8094 }
8095 else
8096 partial_grouping_valid = false;
8097
8098 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8099 {
8100 set_cheapest(child_grouped_rel);
8101 grouped_live_children = lappend(grouped_live_children,
8102 child_grouped_rel);
8103 }
8104
8105 pfree(appinfos);
8106 }
8107
8108 /*
8109 * Try to create append paths for partially grouped children. For full
8110 * partitionwise aggregation, we might have paths in the partial_pathlist
8111 * if parallel aggregation is possible. For partial partitionwise
8112 * aggregation, we may have paths in both pathlist and partial_pathlist.
8113 *
8114 * NB: We must have a partially grouped path for every child in order to
8115 * generate a partially grouped path for this relation.
8116 */
8117 if (partially_grouped_rel && partial_grouping_valid)
8118 {
8119 Assert(partially_grouped_live_children != NIL);
8120
8121 add_paths_to_append_rel(root, partially_grouped_rel,
8122 partially_grouped_live_children);
8123
8124 /*
8125 * We need call set_cheapest, since the finalization step will use the
8126 * cheapest path from the rel.
8127 */
8128 if (partially_grouped_rel->pathlist)
8129 set_cheapest(partially_grouped_rel);
8130 }
8131
8132 /* If possible, create append paths for fully grouped children. */
8133 if (patype == PARTITIONWISE_AGGREGATE_FULL)
8134 {
8135 Assert(grouped_live_children != NIL);
8136
8137 add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8138 }
8139}
8140
8141/*
8142 * group_by_has_partkey
8143 *
8144 * Returns true if all the partition keys of the given relation are part of
8145 * the GROUP BY clauses, including having matching collation, false otherwise.
8146 */
8147static bool
8149 List *targetList,
8150 List *groupClause)
8151{
8152 List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8153 int cnt = 0;
8154 int partnatts;
8155
8156 /* Input relation should be partitioned. */
8157 Assert(input_rel->part_scheme);
8158
8159 /* Rule out early, if there are no partition keys present. */
8160 if (!input_rel->partexprs)
8161 return false;
8162
8163 partnatts = input_rel->part_scheme->partnatts;
8164
8165 for (cnt = 0; cnt < partnatts; cnt++)
8166 {
8167 List *partexprs = input_rel->partexprs[cnt];
8168 ListCell *lc;
8169 bool found = false;
8170
8171 foreach(lc, partexprs)
8172 {
8173 ListCell *lg;
8174 Expr *partexpr = lfirst(lc);
8175 Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8176
8177 foreach(lg, groupexprs)
8178 {
8179 Expr *groupexpr = lfirst(lg);
8180 Oid groupcoll = exprCollation((Node *) groupexpr);
8181
8182 /*
8183 * Note: we can assume there is at most one RelabelType node;
8184 * eval_const_expressions() will have simplified if more than
8185 * one.
8186 */
8187 if (IsA(groupexpr, RelabelType))
8188 groupexpr = ((RelabelType *) groupexpr)->arg;
8189
8190 if (equal(groupexpr, partexpr))
8191 {
8192 /*
8193 * Reject a match if the grouping collation does not match
8194 * the partitioning collation.
8195 */
8196 if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8197 partcoll != groupcoll)
8198 return false;
8199
8200 found = true;
8201 break;
8202 }
8203 }
8204
8205 if (found)
8206 break;
8207 }
8208
8209 /*
8210 * If none of the partition key expressions match with any of the
8211 * GROUP BY expression, return false.
8212 */
8213 if (!found)
8214 return false;
8215 }
8216
8217 return true;
8218}
8219
8220/*
8221 * generate_setop_child_grouplist
8222 * Build a SortGroupClause list defining the sort/grouping properties
8223 * of the child of a set operation.
8224 *
8225 * This is similar to generate_setop_grouplist() but differs as the setop
8226 * child query's targetlist entries may already have a tleSortGroupRef
8227 * assigned for other purposes, such as GROUP BYs. Here we keep the
8228 * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8229 * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8230 * any of the columns in the targetlist don't match to the setop's colTypes
8231 * then we return an empty list. This may leave some TLEs with unreferenced
8232 * ressortgroupref markings, but that's harmless.
8233 */
8234static List *
8236{
8237 List *grouplist = copyObject(op->groupClauses);
8238 ListCell *lg;
8239 ListCell *lt;
8240 ListCell *ct;
8241
8242 lg = list_head(grouplist);
8243 ct = list_head(op->colTypes);
8244 foreach(lt, targetlist)
8245 {
8246 TargetEntry *tle = (TargetEntry *) lfirst(lt);
8247 SortGroupClause *sgc;
8248 Oid coltype;
8249
8250 /* resjunk columns could have sortgrouprefs. Leave these alone */
8251 if (tle->resjunk)
8252 continue;
8253
8254 /*
8255 * We expect every non-resjunk target to have a SortGroupClause and
8256 * colTypes.
8257 */
8258 Assert(lg != NULL);
8259 Assert(ct != NULL);
8260 sgc = (SortGroupClause *) lfirst(lg);
8261 coltype = lfirst_oid(ct);
8262
8263 /* reject if target type isn't the same as the setop target type */
8264 if (coltype != exprType((Node *) tle->expr))
8265 return NIL;
8266
8267 lg = lnext(grouplist, lg);
8268 ct = lnext(op->colTypes, ct);
8269
8270 /* assign a tleSortGroupRef, or reuse the existing one */
8271 sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8272 }
8273
8274 Assert(lg == NULL);
8275 Assert(ct == NULL);
8276
8277 return grouplist;
8278}
8279
8280/*
8281 * create_unique_paths
8282 * Build a new RelOptInfo containing Paths that represent elimination of
8283 * distinct rows from the input data. Distinct-ness is defined according to
8284 * the needs of the semijoin represented by sjinfo. If it is not possible
8285 * to identify how to make the data unique, NULL is returned.
8286 *
8287 * If used at all, this is likely to be called repeatedly on the same rel,
8288 * so we cache the result.
8289 */
8290RelOptInfo *
8292{
8293 RelOptInfo *unique_rel;
8294 List *sortPathkeys = NIL;
8295 List *groupClause = NIL;
8296 MemoryContext oldcontext;
8297
8298 /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8299 Assert(sjinfo->jointype == JOIN_SEMI);
8300 Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8301
8302 /* If result already cached, return it */
8303 if (rel->unique_rel)
8304 return rel->unique_rel;
8305
8306 /* If it's not possible to unique-ify, return NULL */
8307 if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8308 return NULL;
8309
8310 /*
8311 * Punt if this is a child relation and we failed to build a unique-ified
8312 * relation for its parent. This can happen if all the RHS columns were
8313 * found to be equated to constants when unique-ifying the parent table,
8314 * leaving no columns to unique-ify.
8315 */
8316 if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
8317 return NULL;
8318
8319 /*
8320 * When called during GEQO join planning, we are in a short-lived memory
8321 * context. We must make sure that the unique rel and any subsidiary data
8322 * structures created for a baserel survive the GEQO cycle, else the
8323 * baserel is trashed for future GEQO cycles. On the other hand, when we
8324 * are creating those for a joinrel during GEQO, we don't want them to
8325 * clutter the main planning context. Upshot is that the best solution is
8326 * to explicitly allocate memory in the same context the given RelOptInfo
8327 * is in.
8328 */
8330
8331 unique_rel = makeNode(RelOptInfo);
8332 memcpy(unique_rel, rel, sizeof(RelOptInfo));
8333
8334 /*
8335 * clear path info
8336 */
8337 unique_rel->pathlist = NIL;
8338 unique_rel->ppilist = NIL;
8339 unique_rel->partial_pathlist = NIL;
8340 unique_rel->cheapest_startup_path = NULL;
8341 unique_rel->cheapest_total_path = NULL;
8342 unique_rel->cheapest_parameterized_paths = NIL;
8343
8344 /*
8345 * Build the target list for the unique rel. We also build the pathkeys
8346 * that represent the ordering requirements for the sort-based
8347 * implementation, and the list of SortGroupClause nodes that represent
8348 * the columns to be grouped on for the hash-based implementation.
8349 *
8350 * For a child rel, we can construct these fields from those of its
8351 * parent.
8352 */
8353 if (IS_OTHER_REL(rel))
8354 {
8355 PathTarget *child_unique_target;
8356 PathTarget *parent_unique_target;
8357
8358 parent_unique_target = rel->top_parent->unique_rel->reltarget;
8359
8360 child_unique_target = copy_pathtarget(parent_unique_target);
8361
8362 /* Translate the target expressions */
8363 child_unique_target->exprs = (List *)
8365 (Node *) parent_unique_target->exprs,
8366 rel,
8367 rel->top_parent);
8368
8369 unique_rel->reltarget = child_unique_target;
8370
8371 sortPathkeys = rel->top_parent->unique_pathkeys;
8372 groupClause = rel->top_parent->unique_groupclause;
8373 }
8374 else
8375 {
8376 List *newtlist;
8377 int nextresno;
8378 List *sortList = NIL;
8379 ListCell *lc1;
8380 ListCell *lc2;
8381
8382 /*
8383 * The values we are supposed to unique-ify may be expressions in the
8384 * variables of the input rel's targetlist. We have to add any such
8385 * expressions to the unique rel's targetlist.
8386 *
8387 * To complicate matters, some of the values to be unique-ified may be
8388 * known redundant by the EquivalenceClass machinery (e.g., because
8389 * they have been equated to constants). There is no need to compare
8390 * such values during unique-ification, and indeed we had better not
8391 * try because the Vars involved may not have propagated as high as
8392 * the semijoin's level. We use make_pathkeys_for_sortclauses to
8393 * detect such cases, which is a tad inefficient but it doesn't seem
8394 * worth building specialized infrastructure for this.
8395 */
8396 newtlist = make_tlist_from_pathtarget(rel->reltarget);
8397 nextresno = list_length(newtlist) + 1;
8398
8399 forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
8400 {
8401 Expr *uniqexpr = lfirst(lc1);
8402 Oid in_oper = lfirst_oid(lc2);
8403 Oid sortop;
8404 TargetEntry *tle;
8405 bool made_tle = false;
8406
8407 tle = tlist_member(uniqexpr, newtlist);
8408 if (!tle)
8409 {
8410 tle = makeTargetEntry((Expr *) uniqexpr,
8411 nextresno,
8412 NULL,
8413 false);
8414 newtlist = lappend(newtlist, tle);
8415 nextresno++;
8416 made_tle = true;
8417 }
8418
8419 /*
8420 * Try to build an ORDER BY list to sort the input compatibly. We
8421 * do this for each sortable clause even when the clauses are not
8422 * all sortable, so that we can detect clauses that are redundant
8423 * according to the pathkey machinery.
8424 */
8425 sortop = get_ordering_op_for_equality_op(in_oper, false);
8426 if (OidIsValid(sortop))
8427 {
8428 Oid eqop;
8429 SortGroupClause *sortcl;
8430
8431 /*
8432 * The Unique node will need equality operators. Normally
8433 * these are the same as the IN clause operators, but if those
8434 * are cross-type operators then the equality operators are
8435 * the ones for the IN clause operators' RHS datatype.
8436 */
8437 eqop = get_equality_op_for_ordering_op(sortop, NULL);
8438 if (!OidIsValid(eqop)) /* shouldn't happen */
8439 elog(ERROR, "could not find equality operator for ordering operator %u",
8440 sortop);
8441
8442 sortcl = makeNode(SortGroupClause);
8443 sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8444 sortcl->eqop = eqop;
8445 sortcl->sortop = sortop;
8446 sortcl->reverse_sort = false;
8447 sortcl->nulls_first = false;
8448 sortcl->hashable = false; /* no need to make this accurate */
8449 sortList = lappend(sortList, sortcl);
8450
8451 /*
8452 * At each step, convert the SortGroupClause list to pathkey
8453 * form. If the just-added SortGroupClause is redundant, the
8454 * result will be shorter than the SortGroupClause list.
8455 */
8456 sortPathkeys = make_pathkeys_for_sortclauses(root, sortList,
8457 newtlist);
8458 if (list_length(sortPathkeys) != list_length(sortList))
8459 {
8460 /* Drop the redundant SortGroupClause */
8461 sortList = list_delete_last(sortList);
8462 Assert(list_length(sortPathkeys) == list_length(sortList));
8463 /* Undo tlist addition, if we made one */
8464 if (made_tle)
8465 {
8466 newtlist = list_delete_last(newtlist);
8467 nextresno--;
8468 }
8469 /* We need not consider this clause for hashing, either */
8470 continue;
8471 }
8472 }
8473 else if (sjinfo->semi_can_btree) /* shouldn't happen */
8474 elog(ERROR, "could not find ordering operator for equality operator %u",
8475 in_oper);
8476
8477 if (sjinfo->semi_can_hash)
8478 {
8479 /* Create a GROUP BY list for the Agg node to use */
8480 Oid eq_oper;
8481 SortGroupClause *groupcl;
8482
8483 /*
8484 * Get the hashable equality operators for the Agg node to
8485 * use. Normally these are the same as the IN clause
8486 * operators, but if those are cross-type operators then the
8487 * equality operators are the ones for the IN clause
8488 * operators' RHS datatype.
8489 */
8490 if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
8491 elog(ERROR, "could not find compatible hash operator for operator %u",
8492 in_oper);
8493
8494 groupcl = makeNode(SortGroupClause);
8495 groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8496 groupcl->eqop = eq_oper;
8497 groupcl->sortop = sortop;
8498 groupcl->reverse_sort = false;
8499 groupcl->nulls_first = false;
8500 groupcl->hashable = true;
8501 groupClause = lappend(groupClause, groupcl);
8502 }
8503 }
8504
8505 /*
8506 * Done building the sortPathkeys and groupClause. But the
8507 * sortPathkeys are bogus if not all the clauses were sortable.
8508 */
8509 if (!sjinfo->semi_can_btree)
8510 sortPathkeys = NIL;
8511
8512 /*
8513 * It can happen that all the RHS columns are equated to constants.
8514 * We'd have to do something special to unique-ify in that case, and
8515 * it's such an unlikely-in-the-real-world case that it's not worth
8516 * the effort. So just punt if we found no columns to unique-ify.
8517 */
8518 if (sortPathkeys == NIL && groupClause == NIL)
8519 {
8520 MemoryContextSwitchTo(oldcontext);
8521 return NULL;
8522 }
8523
8524 /* Convert the required targetlist back to PathTarget form */
8525 unique_rel->reltarget = create_pathtarget(root, newtlist);
8526 }
8527
8528 /* build unique paths based on input rel's pathlist */
8529 create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8530 sjinfo, unique_rel);
8531
8532 /* build unique paths based on input rel's partial_pathlist */
8533 create_partial_unique_paths(root, rel, sortPathkeys, groupClause,
8534 sjinfo, unique_rel);
8535
8536 /* Now choose the best path(s) */
8537 set_cheapest(unique_rel);
8538
8539 /*
8540 * There shouldn't be any partial paths for the unique relation;
8541 * otherwise, we won't be able to properly guarantee uniqueness.
8542 */
8543 Assert(unique_rel->partial_pathlist == NIL);
8544
8545 /* Cache the result */
8546 rel->unique_rel = unique_rel;
8547 rel->unique_pathkeys = sortPathkeys;
8548 rel->unique_groupclause = groupClause;
8549
8550 MemoryContextSwitchTo(oldcontext);
8551
8552 return unique_rel;
8553}
8554
8555/*
8556 * create_final_unique_paths
8557 * Create unique paths in 'unique_rel' based on 'input_rel' pathlist
8558 */
8559static void
8561 List *sortPathkeys, List *groupClause,
8562 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8563{
8564 Path *cheapest_input_path = input_rel->cheapest_total_path;
8565
8566 /* Estimate number of output rows */
8567 unique_rel->rows = estimate_num_groups(root,
8568 sjinfo->semi_rhs_exprs,
8569 cheapest_input_path->rows,
8570 NULL,
8571 NULL);
8572
8573 /* Consider sort-based implementations, if possible. */
8574 if (sjinfo->semi_can_btree)
8575 {
8576 ListCell *lc;
8577
8578 /*
8579 * Use any available suitably-sorted path as input, and also consider
8580 * sorting the cheapest-total path and incremental sort on any paths
8581 * with presorted keys.
8582 *
8583 * To save planning time, we ignore parameterized input paths unless
8584 * they are the cheapest-total path.
8585 */
8586 foreach(lc, input_rel->pathlist)
8587 {
8588 Path *input_path = (Path *) lfirst(lc);
8589 Path *path;
8590 bool is_sorted;
8591 int presorted_keys;
8592
8593 /*
8594 * Ignore parameterized paths that are not the cheapest-total
8595 * path.
8596 */
8597 if (input_path->param_info &&
8598 input_path != cheapest_input_path)
8599 continue;
8600
8601 is_sorted = pathkeys_count_contained_in(sortPathkeys,
8602 input_path->pathkeys,
8603 &presorted_keys);
8604
8605 /*
8606 * Ignore paths that are not suitably or partially sorted, unless
8607 * they are the cheapest total path (no need to deal with paths
8608 * which have presorted keys when incremental sort is disabled).
8609 */
8610 if (!is_sorted && input_path != cheapest_input_path &&
8611 (presorted_keys == 0 || !enable_incremental_sort))
8612 continue;
8613
8614 /*
8615 * Make a separate ProjectionPath in case we need a Result node.
8616 */
8617 path = (Path *) create_projection_path(root,
8618 unique_rel,
8619 input_path,
8620 unique_rel->reltarget);
8621
8622 if (!is_sorted)
8623 {
8624 /*
8625 * We've no need to consider both a sort and incremental sort.
8626 * We'll just do a sort if there are no presorted keys and an
8627 * incremental sort when there are presorted keys.
8628 */
8629 if (presorted_keys == 0 || !enable_incremental_sort)
8630 path = (Path *) create_sort_path(root,
8631 unique_rel,
8632 path,
8633 sortPathkeys,
8634 -1.0);
8635 else
8637 unique_rel,
8638 path,
8639 sortPathkeys,
8640 presorted_keys,
8641 -1.0);
8642 }
8643
8644 path = (Path *) create_unique_path(root, unique_rel, path,
8645 list_length(sortPathkeys),
8646 unique_rel->rows);
8647
8648 add_path(unique_rel, path);
8649 }
8650 }
8651
8652 /* Consider hash-based implementation, if possible. */
8653 if (sjinfo->semi_can_hash)
8654 {
8655 Path *path;
8656
8657 /*
8658 * Make a separate ProjectionPath in case we need a Result node.
8659 */
8660 path = (Path *) create_projection_path(root,
8661 unique_rel,
8662 cheapest_input_path,
8663 unique_rel->reltarget);
8664
8665 path = (Path *) create_agg_path(root,
8666 unique_rel,
8667 path,
8668 cheapest_input_path->pathtarget,
8669 AGG_HASHED,
8671 groupClause,
8672 NIL,
8673 NULL,
8674 unique_rel->rows);
8675
8676 add_path(unique_rel, path);
8677 }
8678}
8679
8680/*
8681 * create_partial_unique_paths
8682 * Create unique paths in 'unique_rel' based on 'input_rel' partial_pathlist
8683 */
8684static void
8686 List *sortPathkeys, List *groupClause,
8687 SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8688{
8689 RelOptInfo *partial_unique_rel;
8690 Path *cheapest_partial_path;
8691
8692 /* nothing to do when there are no partial paths in the input rel */
8693 if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8694 return;
8695
8696 /*
8697 * nothing to do if there's anything in the targetlist that's
8698 * parallel-restricted.
8699 */
8700 if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8701 return;
8702
8703 cheapest_partial_path = linitial(input_rel->partial_pathlist);
8704
8705 partial_unique_rel = makeNode(RelOptInfo);
8706 memcpy(partial_unique_rel, input_rel, sizeof(RelOptInfo));
8707
8708 /*
8709 * clear path info
8710 */
8711 partial_unique_rel->pathlist = NIL;
8712 partial_unique_rel->ppilist = NIL;
8713 partial_unique_rel->partial_pathlist = NIL;
8714 partial_unique_rel->cheapest_startup_path = NULL;
8715 partial_unique_rel->cheapest_total_path = NULL;
8716 partial_unique_rel->cheapest_parameterized_paths = NIL;
8717
8718 /* Estimate number of output rows */
8719 partial_unique_rel->rows = estimate_num_groups(root,
8720 sjinfo->semi_rhs_exprs,
8721 cheapest_partial_path->rows,
8722 NULL,
8723 NULL);
8724 partial_unique_rel->reltarget = unique_rel->reltarget;
8725
8726 /* Consider sort-based implementations, if possible. */
8727 if (sjinfo->semi_can_btree)
8728 {
8729 ListCell *lc;
8730
8731 /*
8732 * Use any available suitably-sorted path as input, and also consider
8733 * sorting the cheapest partial path and incremental sort on any paths
8734 * with presorted keys.
8735 */
8736 foreach(lc, input_rel->partial_pathlist)
8737 {
8738 Path *input_path = (Path *) lfirst(lc);
8739 Path *path;
8740 bool is_sorted;
8741 int presorted_keys;
8742
8743 is_sorted = pathkeys_count_contained_in(sortPathkeys,
8744 input_path->pathkeys,
8745 &presorted_keys);
8746
8747 /*
8748 * Ignore paths that are not suitably or partially sorted, unless
8749 * they are the cheapest partial path (no need to deal with paths
8750 * which have presorted keys when incremental sort is disabled).
8751 */
8752 if (!is_sorted && input_path != cheapest_partial_path &&
8753 (presorted_keys == 0 || !enable_incremental_sort))
8754 continue;
8755
8756 /*
8757 * Make a separate ProjectionPath in case we need a Result node.
8758 */
8759 path = (Path *) create_projection_path(root,
8760 partial_unique_rel,
8761 input_path,
8762 partial_unique_rel->reltarget);
8763
8764 if (!is_sorted)
8765 {
8766 /*
8767 * We've no need to consider both a sort and incremental sort.
8768 * We'll just do a sort if there are no presorted keys and an
8769 * incremental sort when there are presorted keys.
8770 */
8771 if (presorted_keys == 0 || !enable_incremental_sort)
8772 path = (Path *) create_sort_path(root,
8773 partial_unique_rel,
8774 path,
8775 sortPathkeys,
8776 -1.0);
8777 else
8779 partial_unique_rel,
8780 path,
8781 sortPathkeys,
8782 presorted_keys,
8783 -1.0);
8784 }
8785
8786 path = (Path *) create_unique_path(root, partial_unique_rel, path,
8787 list_length(sortPathkeys),
8788 partial_unique_rel->rows);
8789
8790 add_partial_path(partial_unique_rel, path);
8791 }
8792 }
8793
8794 /* Consider hash-based implementation, if possible. */
8795 if (sjinfo->semi_can_hash)
8796 {
8797 Path *path;
8798
8799 /*
8800 * Make a separate ProjectionPath in case we need a Result node.
8801 */
8802 path = (Path *) create_projection_path(root,
8803 partial_unique_rel,
8804 cheapest_partial_path,
8805 partial_unique_rel->reltarget);
8806
8807 path = (Path *) create_agg_path(root,
8808 partial_unique_rel,
8809 path,
8810 cheapest_partial_path->pathtarget,
8811 AGG_HASHED,
8813 groupClause,
8814 NIL,
8815 NULL,
8816 partial_unique_rel->rows);
8817
8818 add_partial_path(partial_unique_rel, path);
8819 }
8820
8821 if (partial_unique_rel->partial_pathlist != NIL)
8822 {
8823 generate_useful_gather_paths(root, partial_unique_rel, true);
8824 set_cheapest(partial_unique_rel);
8825
8826 /*
8827 * Finally, create paths to unique-ify the final result. This step is
8828 * needed to remove any duplicates due to combining rows from parallel
8829 * workers.
8830 */
8831 create_final_unique_paths(root, partial_unique_rel,
8832 sortPathkeys, groupClause,
8833 sjinfo, unique_rel);
8834 }
8835}
@ ACLCHECK_NO_PRIV
Definition: acl.h:184
void aclcheck_error(AclResult aclerr, ObjectType objtype, const char *objectname)
Definition: aclchk.c:2652
int compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages, int max_workers)
Definition: allpaths.c:4244
void generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
Definition: allpaths.c:3230
void add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel, List *live_childrels)
Definition: allpaths.c:1321
AppendRelInfo ** find_appinfos_by_relids(PlannerInfo *root, Relids relids, int *nappinfos)
Definition: appendinfo.c:753
Node * adjust_appendrel_attrs(PlannerInfo *root, Node *node, int nappinfos, AppendRelInfo **appinfos)
Definition: appendinfo.c:200
List * adjust_inherited_attnums_multilevel(PlannerInfo *root, List *attnums, Index child_relid, Index top_parent_relid)
Definition: appendinfo.c:682
Node * adjust_appendrel_attrs_multilevel(PlannerInfo *root, Node *node, RelOptInfo *childrel, RelOptInfo *parentrel)
Definition: appendinfo.c:541
void pprint(const void *obj)
Definition: print.c:54
void pgstat_report_plan_id(int64 plan_id, bool force)
BipartiteMatchState * BipartiteMatch(int u_size, int v_size, short **adjacency)
void BipartiteMatchFree(BipartiteMatchState *state)
Bitmapset * bms_difference(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:346
Bitmapset * bms_make_singleton(int x)
Definition: bitmapset.c:216
bool bms_equal(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:142
int bms_next_member(const Bitmapset *a, int prevbit)
Definition: bitmapset.c:1306
Bitmapset * bms_del_members(Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:1161
Bitmapset * bms_del_member(Bitmapset *a, int x)
Definition: bitmapset.c:868
bool bms_is_subset(const Bitmapset *a, const Bitmapset *b)
Definition: bitmapset.c:412
void bms_free(Bitmapset *a)
Definition: bitmapset.c:239
int bms_num_members(const Bitmapset *a)
Definition: bitmapset.c:751
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:510
Bitmapset * bms_add_member(Bitmapset *a, int x)
Definition: bitmapset.c:815
BMS_Membership bms_membership(const Bitmapset *a)
Definition: bitmapset.c:781
bool bms_overlap_list(const Bitmapset *a, const List *b)
Definition: bitmapset.c:608
#define bms_is_empty(a)
Definition: bitmapset.h:118
@ BMS_MULTIPLE
Definition: bitmapset.h:73
uint32 BlockNumber
Definition: block.h:31
#define Min(x, y)
Definition: c.h:1004
#define Max(x, y)
Definition: c.h:998
int64_t int64
Definition: c.h:536
unsigned int Index
Definition: c.h:620
#define MemSet(start, val, len)
Definition: c.h:1020
#define OidIsValid(objectId)
Definition: c.h:775
size_t Size
Definition: c.h:611
bool contain_agg_clause(Node *clause)
Definition: clauses.c:182
Node * estimate_expression_value(PlannerInfo *root, Node *node)
Definition: clauses.c:2403
WindowFuncLists * find_window_functions(Node *clause, Index maxWinRef)
Definition: clauses.c:232
Node * eval_const_expressions(PlannerInfo *root, Node *node)
Definition: clauses.c:2262
void convert_saop_to_hashed_saop(Node *node)
Definition: clauses.c:2295
char max_parallel_hazard(Query *parse)
Definition: clauses.c:738
bool is_parallel_safe(PlannerInfo *root, Node *node)
Definition: clauses.c:757
bool contain_subplans(Node *clause)
Definition: clauses.c:334
bool contain_volatile_functions(Node *clause)
Definition: clauses.c:542
double cpu_operator_cost
Definition: costsize.c:134
bool enable_partitionwise_aggregate
Definition: costsize.c:160
int max_parallel_workers_per_gather
Definition: costsize.c:143
double parallel_setup_cost
Definition: costsize.c:136
double parallel_tuple_cost
Definition: costsize.c:135
void cost_sort(Path *path, PlannerInfo *root, List *pathkeys, int input_disabled_nodes, Cost input_cost, double tuples, int width, Cost comparison_cost, int sort_mem, double limit_tuples)
Definition: costsize.c:2144
double compute_gather_rows(Path *path)
Definition: costsize.c:6660
void cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
Definition: costsize.c:4817
PathTarget * set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
Definition: costsize.c:6402
void cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
Definition: costsize.c:4791
bool enable_presorted_aggregate
Definition: costsize.c:164
bool enable_hashagg
Definition: costsize.c:152
int32 clamp_width_est(int64 tuple_width)
Definition: costsize.c:242
bool enable_indexscan
Definition: costsize.c:146
bool enable_incremental_sort
Definition: costsize.c:151
Plan * materialize_finished_plan(Plan *subplan)
Definition: createplan.c:6511
Plan * create_plan(PlannerInfo *root, Path *best_path)
Definition: createplan.c:341
int errdetail(const char *fmt,...)
Definition: elog.c:1207
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
bool equal(const void *a, const void *b)
Definition: equalfuncs.c:223
bool ExecSupportsBackwardScan(Plan *node)
Definition: execAmi.c:511
bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
Definition: execMain.c:646
#define OidFunctionCall1(functionId, arg1)
Definition: fmgr.h:720
FdwRoutine * GetFdwRoutineByRelId(Oid relid)
Definition: foreign.c:420
int max_parallel_maintenance_workers
Definition: globals.c:134
bool IsUnderPostmaster
Definition: globals.c:120
int maintenance_work_mem
Definition: globals.c:133
Assert(PointerIsAligned(start, uint64))
#define IsParallelWorker()
Definition: parallel.h:60
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int j
Definition: isn.c:78
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
double jit_optimize_above_cost
Definition: jit.c:41
bool jit_enabled
Definition: jit.c:32
bool jit_expressions
Definition: jit.c:36
bool jit_tuple_deforming
Definition: jit.c:38
double jit_above_cost
Definition: jit.c:39
double jit_inline_above_cost
Definition: jit.c:40
#define PGJIT_OPT3
Definition: jit.h:21
#define PGJIT_NONE
Definition: jit.h:19
#define PGJIT_EXPR
Definition: jit.h:23
#define PGJIT_DEFORM
Definition: jit.h:24
#define PGJIT_INLINE
Definition: jit.h:22
#define PGJIT_PERFORM
Definition: jit.h:20
Bitmapset * DiscreteKnapsack(int max_weight, int num_items, int *item_weights, double *item_values)
Definition: knapsack.c:52
List * lappend(List *list, void *datum)
Definition: list.c:339
List * list_difference_int(const List *list1, const List *list2)
Definition: list.c:1288
List * list_concat_unique_ptr(List *list1, const List *list2)
Definition: list.c:1427
List * list_concat(List *list1, const List *list2)
Definition: list.c:561
List * list_copy(const List *oldlist)
Definition: list.c:1573
List * lappend_int(List *list, int datum)
Definition: list.c:357
List * lcons(void *datum, List *list)
Definition: list.c:495
List * list_delete_int(List *list, int datum)
Definition: list.c:891
List * list_delete_last(List *list)
Definition: list.c:957
bool list_member_ptr(const List *list, const void *datum)
Definition: list.c:682
void list_free(List *list)
Definition: list.c:1546
bool list_member_int(const List *list, int datum)
Definition: list.c:702
List * list_copy_head(const List *oldlist, int len)
Definition: list.c:1593
List * list_concat_unique(List *list1, const List *list2)
Definition: list.c:1405
#define NoLock
Definition: lockdefs.h:34
#define AccessShareLock
Definition: lockdefs.h:36
@ LockWaitBlock
Definition: lockoptions.h:39
LockClauseStrength
Definition: lockoptions.h:22
@ LCS_FORUPDATE
Definition: lockoptions.h:27
@ LCS_NONE
Definition: lockoptions.h:23
@ LCS_FORSHARE
Definition: lockoptions.h:25
@ LCS_FORKEYSHARE
Definition: lockoptions.h:24
@ LCS_FORNOKEYUPDATE
Definition: lockoptions.h:26
char * get_rel_name(Oid relid)
Definition: lsyscache.c:2095
bool get_compatible_hash_operators(Oid opno, Oid *lhs_opno, Oid *rhs_opno)
Definition: lsyscache.c:482
RegProcedure get_func_support(Oid funcid)
Definition: lsyscache.c:2025
Oid get_equality_op_for_ordering_op(Oid opno, bool *reverse)
Definition: lsyscache.c:331
Oid get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
Definition: lsyscache.c:369
int32 get_typavgwidth(Oid typid, int32 typmod)
Definition: lsyscache.c:2745
Datum subpath(PG_FUNCTION_ARGS)
Definition: ltree_op.c:311
TargetEntry * makeTargetEntry(Expr *expr, AttrNumber resno, char *resname, bool resjunk)
Definition: makefuncs.c:289
Expr * make_opclause(Oid opno, Oid opresulttype, bool opretset, Expr *leftop, Expr *rightop, Oid opcollid, Oid inputcollid)
Definition: makefuncs.c:701
Const * makeConst(Oid consttype, int32 consttypmod, Oid constcollid, int constlen, Datum constvalue, bool constisnull, bool constbyval)
Definition: makefuncs.c:350
List * make_ands_implicit(Expr *clause)
Definition: makefuncs.c:810
char * pstrdup(const char *in)
Definition: mcxt.c:1759
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
MemoryContext GetMemoryChunkContext(void *pointer)
Definition: mcxt.c:753
Oid exprType(const Node *expr)
Definition: nodeFuncs.c:42
Oid exprCollation(const Node *expr)
Definition: nodeFuncs.c:821
bool expression_returns_set(Node *clause)
Definition: nodeFuncs.c:763
void fix_opfuncids(Node *node)
Definition: nodeFuncs.c:1841
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3615
#define DO_AGGSPLIT_SKIPFINAL(as)
Definition: nodes.h:396
#define IsA(nodeptr, _type_)
Definition: nodes.h:164
#define copyObject(obj)
Definition: nodes.h:232
double Cost
Definition: nodes.h:261
#define nodeTag(nodeptr)
Definition: nodes.h:139
#define IS_OUTER_JOIN(jointype)
Definition: nodes.h:348
@ CMD_MERGE
Definition: nodes.h:279
@ CMD_DELETE
Definition: nodes.h:278
@ CMD_UPDATE
Definition: nodes.h:276
@ CMD_SELECT
Definition: nodes.h:275
AggStrategy
Definition: nodes.h:363
@ AGG_SORTED
Definition: nodes.h:365
@ AGG_HASHED
Definition: nodes.h:366
@ AGG_MIXED
Definition: nodes.h:367
@ AGG_PLAIN
Definition: nodes.h:364
#define DO_AGGSPLIT_SERIALIZE(as)
Definition: nodes.h:397
AggSplit
Definition: nodes.h:385
@ AGGSPLIT_FINAL_DESERIAL
Definition: nodes.h:391
@ AGGSPLIT_SIMPLE
Definition: nodes.h:387
@ AGGSPLIT_INITIAL_SERIAL
Definition: nodes.h:389
@ LIMIT_OPTION_COUNT
Definition: nodes.h:441
#define makeNode(_type_)
Definition: nodes.h:161
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
@ JOIN_SEMI
Definition: nodes.h:317
#define PVC_RECURSE_AGGREGATES
Definition: optimizer.h:184
#define PVC_RECURSE_WINDOWFUNCS
Definition: optimizer.h:186
@ DEBUG_PARALLEL_REGRESS
Definition: optimizer.h:97
@ DEBUG_PARALLEL_OFF
Definition: optimizer.h:95
#define PVC_INCLUDE_WINDOWFUNCS
Definition: optimizer.h:185
#define PVC_INCLUDE_PLACEHOLDERS
Definition: optimizer.h:187
#define PVC_INCLUDE_AGGREGATES
Definition: optimizer.h:183
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
int assign_special_exec_param(PlannerInfo *root)
Definition: paramassign.c:754
List * expand_grouping_sets(List *groupingSets, bool groupDistinct, int limit)
Definition: parse_agg.c:1920
Index assignSortGroupRef(TargetEntry *tle, List *tlist)
RTEPermissionInfo * getRTEPermissionInfo(List *rteperminfos, RangeTblEntry *rte)
RTEPermissionInfo * addRTEPermissionInfo(List **rteperminfos, RangeTblEntry *rte)
#define CURSOR_OPT_SCROLL
Definition: parsenodes.h:3384
#define CURSOR_OPT_FAST_PLAN
Definition: parsenodes.h:3390
@ RTE_JOIN
Definition: parsenodes.h:1043
@ RTE_VALUES
Definition: parsenodes.h:1046
@ RTE_SUBQUERY
Definition: parsenodes.h:1042
@ RTE_RESULT
Definition: parsenodes.h:1049
@ RTE_FUNCTION
Definition: parsenodes.h:1044
@ RTE_TABLEFUNC
Definition: parsenodes.h:1045
@ RTE_GROUP
Definition: parsenodes.h:1052
@ RTE_RELATION
Definition: parsenodes.h:1041
@ OBJECT_VIEW
Definition: parsenodes.h:2373
#define CURSOR_OPT_PARALLEL_OK
Definition: parsenodes.h:3393
void CheckSelectLocking(Query *qry, LockClauseStrength strength)
Definition: analyze.c:3343
const char * LCS_asString(LockClauseStrength strength)
Definition: analyze.c:3318
#define rt_fetch(rangetable_index, rangetable)
Definition: parsetree.h:31
void DestroyPartitionDirectory(PartitionDirectory pdir)
Definition: partdesc.c:484
List * append_pathkeys(List *target, List *source)
Definition: pathkeys.c:107
bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common)
Definition: pathkeys.c:558
List * make_pathkeys_for_sortclauses(PlannerInfo *root, List *sortclauses, List *tlist)
Definition: pathkeys.c:1336
List * make_pathkeys_for_sortclauses_extended(PlannerInfo *root, List **sortclauses, List *tlist, bool remove_redundant, bool remove_group_rtindex, bool *sortable, bool set_ec_sortref)
Definition: pathkeys.c:1381
bool pathkeys_contained_in(List *keys1, List *keys2)
Definition: pathkeys.c:343
PathKeysComparison compare_pathkeys(List *keys1, List *keys2)
Definition: pathkeys.c:304
List * get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
Definition: pathkeys.c:467
IndexPath * create_index_path(PlannerInfo *root, IndexOptInfo *index, List *indexclauses, List *indexorderbys, List *indexorderbycols, List *pathkeys, ScanDirection indexscandir, bool indexonly, Relids required_outer, double loop_count, bool partial_path)
Definition: pathnode.c:1047
ProjectSetPath * create_set_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2720
ProjectionPath * create_projection_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target)
Definition: pathnode.c:2522
WindowAggPath * create_windowagg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *windowFuncs, List *runCondition, WindowClause *winclause, List *qual, bool topwindow)
Definition: pathnode.c:3328
LockRowsPath * create_lockrows_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *rowMarks, int epqParam)
Definition: pathnode.c:3564
Path * apply_projection_to_path(PlannerInfo *root, RelOptInfo *rel, Path *path, PathTarget *target)
Definition: pathnode.c:2631
Path * create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer, int parallel_workers)
Definition: pathnode.c:981
GatherMergePath * create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, List *pathkeys, Relids required_outer, double *rows)
Definition: pathnode.c:1748
void set_cheapest(RelOptInfo *parent_rel)
Definition: pathnode.c:268
void add_partial_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:793
LimitPath * create_limit_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, Node *limitOffset, Node *limitCount, LimitOption limitOption, int64 offset_est, int64 count_est)
Definition: pathnode.c:3730
AppendPath * create_append_path(PlannerInfo *root, RelOptInfo *rel, List *subpaths, List *partial_subpaths, List *pathkeys, Relids required_outer, int parallel_workers, bool parallel_aware, double rows)
Definition: pathnode.c:1298
int compare_fractional_path_costs(Path *path1, Path *path2, double fraction)
Definition: pathnode.c:123
IncrementalSortPath * create_incremental_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, int presorted_keys, double limit_tuples)
Definition: pathnode.c:2790
GroupingSetsPath * create_groupingsets_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *having_qual, AggStrategy aggstrategy, List *rollups, const AggClauseCosts *agg_costs)
Definition: pathnode.c:3074
SortPath * create_sort_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *pathkeys, double limit_tuples)
Definition: pathnode.c:2839
GroupPath * create_group_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *groupClause, List *qual, double numGroups)
Definition: pathnode.c:2883
void add_path(RelOptInfo *parent_rel, Path *new_path)
Definition: pathnode.c:459
UniquePath * create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, int numCols, double numGroups)
Definition: pathnode.c:2940
AggPath * create_agg_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, PathTarget *target, AggStrategy aggstrategy, AggSplit aggsplit, List *groupClause, List *qual, const AggClauseCosts *aggcosts, double numGroups)
Definition: pathnode.c:2992
ModifyTablePath * create_modifytable_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, CmdType operation, bool canSetTag, Index nominalRelation, Index rootRelation, bool partColsUpdated, List *resultRelations, List *updateColnosLists, List *withCheckOptionLists, List *returningLists, List *rowMarks, OnConflictExpr *onconflict, List *mergeActionLists, List *mergeJoinConditions, int epqParam)
Definition: pathnode.c:3628
GroupResultPath * create_group_result_path(PlannerInfo *root, RelOptInfo *rel, PathTarget *target, List *havingqual)
Definition: pathnode.c:1607
PartitionwiseAggregateType
Definition: pathnodes.h:3404
@ PARTITIONWISE_AGGREGATE_PARTIAL
Definition: pathnodes.h:3407
@ PARTITIONWISE_AGGREGATE_FULL
Definition: pathnodes.h:3406
@ PARTITIONWISE_AGGREGATE_NONE
Definition: pathnodes.h:3405
#define IS_DUMMY_REL(r)
Definition: pathnodes.h:2105
#define GROUPING_CAN_USE_HASH
Definition: pathnodes.h:3389
#define get_pathtarget_sortgroupref(target, colno)
Definition: pathnodes.h:1707
#define IS_PARTITIONED_REL(rel)
Definition: pathnodes.h:1104
#define GROUPING_CAN_USE_SORT
Definition: pathnodes.h:3388
#define GROUPING_CAN_PARTIAL_AGG
Definition: pathnodes.h:3390
@ UPPERREL_GROUP_AGG
Definition: pathnodes.h:74
@ UPPERREL_FINAL
Definition: pathnodes.h:79
@ UPPERREL_DISTINCT
Definition: pathnodes.h:77
@ UPPERREL_PARTIAL_GROUP_AGG
Definition: pathnodes.h:72
@ UPPERREL_ORDERED
Definition: pathnodes.h:78
@ UPPERREL_WINDOW
Definition: pathnodes.h:75
@ UPPERREL_PARTIAL_DISTINCT
Definition: pathnodes.h:76
@ RELOPT_OTHER_UPPER_REL
Definition: pathnodes.h:869
#define IS_OTHER_REL(rel)
Definition: pathnodes.h:891
@ PATHKEYS_BETTER2
Definition: paths.h:210
@ PATHKEYS_BETTER1
Definition: paths.h:209
@ PATHKEYS_DIFFERENT
Definition: paths.h:211
@ PATHKEYS_EQUAL
Definition: paths.h:208
void * arg
#define lfirst(lc)
Definition: pg_list.h:172
#define lfirst_node(type, lc)
Definition: pg_list.h:176
static int list_length(const List *l)
Definition: pg_list.h:152
#define linitial_node(type, l)
Definition: pg_list.h:181
#define NIL
Definition: pg_list.h:68
#define forboth(cell1, list1, cell2, list2)
Definition: pg_list.h:518
#define foreach_current_index(var_or_cell)
Definition: pg_list.h:403
#define lfirst_int(lc)
Definition: pg_list.h:173
#define list_make1(x1)
Definition: pg_list.h:212
#define linitial_int(l)
Definition: pg_list.h:179
#define for_each_cell(cell, lst, initcell)
Definition: pg_list.h:438
#define for_each_from(cell, lst, N)
Definition: pg_list.h:414
static void * list_nth(const List *list, int n)
Definition: pg_list.h:299
#define linitial(l)
Definition: pg_list.h:178
#define foreach_node(type, var, lst)
Definition: pg_list.h:496
static ListCell * list_head(const List *l)
Definition: pg_list.h:128
#define list_nth_node(type, list, n)
Definition: pg_list.h:327
static ListCell * lnext(const List *l, const ListCell *c)
Definition: pg_list.h:343
#define list_make1_int(x1)
Definition: pg_list.h:227
#define lfirst_oid(lc)
Definition: pg_list.h:174
static int list_cell_number(const List *l, const ListCell *c)
Definition: pg_list.h:333
#define llast_node(type, l)
Definition: pg_list.h:202
static int scale
Definition: pgbench.c:182
void preprocess_minmax_aggregates(PlannerInfo *root)
Definition: planagg.c:73
void estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac)
Definition: plancat.c:1156
int32 get_relation_data_width(Oid relid, int32 *attr_widths)
Definition: plancat.c:1323
RelOptInfo * query_planner(PlannerInfo *root, query_pathkeys_callback qp_callback, void *qp_extra)
Definition: planmain.c:54
#define DEFAULT_CURSOR_TUPLE_FRACTION
Definition: planmain.h:21
#define EXPRKIND_TABLEFUNC_LATERAL
Definition: planner.c:93
static RelOptInfo * create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *distinct_rel)
Definition: planner.c:5018
static List * postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Definition: planner.c:5753
static PathTarget * make_partial_grouping_target(PlannerInfo *root, PathTarget *grouping_target, Node *havingQual)
Definition: planner.c:5615
Expr * expression_planner_with_deps(Expr *expr, List **relationOids, List **invalItems)
Definition: planner.c:6746
#define EXPRKIND_TARGET
Definition: planner.c:82
#define EXPRKIND_APPINFO
Definition: planner.c:88
static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
Definition: planner.c:7644
static void preprocess_rowmarks(PlannerInfo *root)
Definition: planner.c:2374
#define EXPRKIND_TABLESAMPLE
Definition: planner.c:90
PlannedStmt * planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:293
static void create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel)
Definition: planner.c:3942
#define EXPRKIND_GROUPEXPR
Definition: planner.c:94
planner_hook_type planner_hook
Definition: planner.c:74
double cursor_tuple_fraction
Definition: planner.c:68
static bool is_degenerate_grouping(PlannerInfo *root)
Definition: planner.c:3921
bool plan_cluster_use_sort(Oid tableOid, Oid indexOid)
Definition: planner.c:6799
static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
Definition: planner.c:1335
int plan_create_index_workers(Oid tableOid, Oid indexOid)
Definition: planner.c:6921
#define EXPRKIND_PHV
Definition: planner.c:89
#define EXPRKIND_RTFUNC_LATERAL
Definition: planner.c:84
#define EXPRKIND_VALUES_LATERAL
Definition: planner.c:86
static void create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, GroupPathExtraData *extra, RelOptInfo **partially_grouped_rel_p)
Definition: planner.c:4006
RelOptInfo * create_unique_paths(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
Definition: planner.c:8291
#define EXPRKIND_LIMIT
Definition: planner.c:87
#define EXPRKIND_VALUES
Definition: planner.c:85
static bool can_partial_agg(PlannerInfo *root)
Definition: planner.c:7727
static double preprocess_limit(PlannerInfo *root, double tuple_fraction, int64 *offset_est, int64 *count_est)
Definition: planner.c:2552
Path * get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
Definition: planner.c:6557
Expr * preprocess_phv_expression(PlannerInfo *root, Expr *expr)
Definition: planner.c:1379
static List * get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys, List *path_pathkeys)
Definition: planner.c:5198
bool parallel_leader_participation
Definition: planner.c:70
static PathTarget * make_window_input_target(PlannerInfo *root, PathTarget *final_target, List *activeWindows)
Definition: planner.c:6133
static void apply_scanjoin_target_to_paths(PlannerInfo *root, RelOptInfo *rel, List *scanjoin_targets, List *scanjoin_targets_contain_srfs, bool scanjoin_target_parallel_safe, bool tlist_same_exprs)
Definition: planner.c:7769
static RelOptInfo * create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target)
Definition: planner.c:4765
static void optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5790
RowMarkType select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
Definition: planner.c:2486
PlannerInfo * subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root, bool hasRecursion, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:659
static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel, List *targets, List *targets_contain_srfs)
Definition: planner.c:6603
static void create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *final_distinct_rel, PathTarget *target)
Definition: planner.c:4835
#define EXPRKIND_QUAL
Definition: planner.c:81
static List * preprocess_groupclause(PlannerInfo *root, List *force)
Definition: planner.c:2803
static Node * preprocess_expression(PlannerInfo *root, Node *expr, int kind)
Definition: planner.c:1233
static Path * make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path, Path *cheapest_path, List *pathkeys, double limit_tuples)
Definition: planner.c:7585
static bool has_volatile_pathkey(List *keys)
Definition: planner.c:3159
static RelOptInfo * create_partial_grouping_paths(PlannerInfo *root, RelOptInfo *grouped_rel, RelOptInfo *input_rel, grouping_sets_data *gd, GroupPathExtraData *extra, bool force_rel_creation)
Definition: planner.c:7291
static void name_active_windows(List *activeWindows)
Definition: planner.c:6013
static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition: planner.c:8560
static PathTarget * make_sort_input_target(PlannerInfo *root, PathTarget *final_target, bool *have_postponed_srfs)
Definition: planner.c:6381
static void create_one_window_path(PlannerInfo *root, RelOptInfo *window_rel, Path *path, PathTarget *input_target, PathTarget *output_target, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4595
bool enable_distinct_reordering
Definition: planner.c:71
void mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
Definition: planner.c:5718
static grouping_sets_data * preprocess_grouping_sets(PlannerInfo *root)
Definition: planner.c:2156
int debug_parallel_query
Definition: planner.c:69
static List * remap_to_groupclause_idx(List *groupClause, List *gsets, int *tleref_to_colnum_map)
Definition: planner.c:2337
static void adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
Definition: planner.c:3204
static PathTarget * make_group_input_target(PlannerInfo *root, PathTarget *final_target)
Definition: planner.c:5503
static List * reorder_grouping_sets(List *groupingSets, List *sortclause)
Definition: planner.c:3111
static int common_prefix_cmp(const void *a, const void *b)
Definition: planner.c:6064
static void grouping_planner(PlannerInfo *root, double tuple_fraction, SetOperationStmt *setops)
Definition: planner.c:1412
static RelOptInfo * make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, Node *havingQual)
Definition: planner.c:3868
static List * generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
Definition: planner.c:8235
static List * select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
Definition: planner.c:5930
Expr * expression_planner(Expr *expr)
Definition: planner.c:6719
static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel, List *sortPathkeys, List *groupClause, SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
Definition: planner.c:8685
bool limit_needed(Query *parse)
Definition: planner.c:2737
create_upper_paths_hook_type create_upper_paths_hook
Definition: planner.c:77
#define EXPRKIND_TABLEFUNC
Definition: planner.c:92
static void consider_groupingsets_paths(PlannerInfo *root, RelOptInfo *grouped_rel, Path *path, bool is_sorted, bool can_hash, grouping_sets_data *gd, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: planner.c:4146
static List * make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, List *tlist)
Definition: planner.c:6253
static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, double dNumGroups, GroupPathExtraData *extra)
Definition: planner.c:7054
static RelOptInfo * create_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, double limit_tuples)
Definition: planner.c:5283
#define EXPRKIND_RTFUNC
Definition: planner.c:83
static double get_number_of_groups(PlannerInfo *root, double path_rows, grouping_sets_data *gd, List *target_list)
Definition: planner.c:3633
static List * extract_rollup_sets(List *groupingSets)
Definition: planner.c:2899
static RelOptInfo * create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, bool target_parallel_safe, grouping_sets_data *gd)
Definition: planner.c:3755
static void create_partitionwise_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *grouped_rel, RelOptInfo *partially_grouped_rel, const AggClauseCosts *agg_costs, grouping_sets_data *gd, PartitionwiseAggregateType patype, GroupPathExtraData *extra)
Definition: planner.c:8004
#define EXPRKIND_ARBITER_ELEM
Definition: planner.c:91
static bool group_by_has_partkey(RelOptInfo *input_rel, List *targetList, List *groupClause)
Definition: planner.c:8148
PlannedStmt * standard_planner(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.c:309
static void standard_qp_callback(PlannerInfo *root, void *extra)
Definition: planner.c:3428
static RelOptInfo * create_window_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *input_target, PathTarget *output_target, bool output_target_parallel_safe, WindowFuncLists *wflists, List *activeWindows)
Definition: planner.c:4508
PlannedStmt *(* planner_hook_type)(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams)
Definition: planner.h:26
void(* create_upper_paths_hook_type)(PlannerInfo *root, UpperRelationKind stage, RelOptInfo *input_rel, RelOptInfo *output_rel, void *extra)
Definition: planner.h:33
@ PLAN_STMT_STANDARD
Definition: plannodes.h:41
RowMarkType
Definition: plannodes.h:1528
@ ROW_MARK_COPY
Definition: plannodes.h:1534
@ ROW_MARK_REFERENCE
Definition: plannodes.h:1533
@ ROW_MARK_SHARE
Definition: plannodes.h:1531
@ ROW_MARK_EXCLUSIVE
Definition: plannodes.h:1529
@ ROW_MARK_NOKEYEXCLUSIVE
Definition: plannodes.h:1530
@ ROW_MARK_KEYSHARE
Definition: plannodes.h:1532
#define snprintf
Definition: port.h:239
#define qsort(a, b, c, d)
Definition: port.h:479
#define printf(...)
Definition: port.h:245
static Datum Int64GetDatum(int64 X)
Definition: postgres.h:403
static int64 DatumGetInt64(Datum X)
Definition: postgres.h:393
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
void get_agg_clause_costs(PlannerInfo *root, AggSplit aggsplit, AggClauseCosts *costs)
Definition: prepagg.c:559
void preprocess_aggrefs(PlannerInfo *root, Node *clause)
Definition: prepagg.c:110
void preprocess_function_rtes(PlannerInfo *root)
void flatten_simple_union_all(PlannerInfo *root)
void transform_MERGE_to_join(Query *parse)
Definition: prepjointree.c:187
void remove_useless_result_rtes(PlannerInfo *root)
void pull_up_sublinks(PlannerInfo *root)
Definition: prepjointree.c:647
void replace_empty_jointree(Query *parse)
Definition: prepjointree.c:589
void pull_up_subqueries(PlannerInfo *root)
Relids get_relids_in_jointree(Node *jtnode, bool include_outer_joins, bool include_inner_joins)
Query * preprocess_relation_rtes(PlannerInfo *root)
Definition: prepjointree.c:417
void reduce_outer_joins(PlannerInfo *root)
Expr * canonicalize_qual(Expr *qual, bool is_check)
Definition: prepqual.c:293
char * c
e
Definition: preproc-init.c:82
void preprocess_targetlist(PlannerInfo *root)
Definition: preptlist.c:64
RelOptInfo * plan_set_operations(PlannerInfo *root)
Definition: prepunion.c:93
tree ctl root
Definition: radixtree.h:1857
static struct subre * parse(struct vars *v, int stopper, int type, struct state *init, struct state *final)
Definition: regcomp.c:717
List * RelationGetIndexPredicate(Relation relation)
Definition: relcache.c:5210
List * RelationGetIndexExpressions(Relation relation)
Definition: relcache.c:5097
RelOptInfo * find_base_rel(PlannerInfo *root, int relid)
Definition: relnode.c:416
void setup_simple_rel_arrays(PlannerInfo *root)
Definition: relnode.c:94
RelOptInfo * fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
Definition: relnode.c:1464
RelOptInfo * build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
Definition: relnode.c:192
Node * remove_nulling_relids(Node *node, const Bitmapset *removable_relids, const Bitmapset *except_relids)
@ ForwardScanDirection
Definition: sdir.h:28
double estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List **pgset, EstimationInfo *estinfo)
Definition: selfuncs.c:3456
double estimate_hashagg_tablesize(PlannerInfo *root, Path *path, const AggClauseCosts *agg_costs, double dNumGroups)
Definition: selfuncs.c:4194
Plan * set_plan_references(PlannerInfo *root, Plan *plan)
Definition: setrefs.c:288
bool extract_query_dependencies_walker(Node *node, PlannerInfo *context)
Definition: setrefs.c:3673
void check_stack_depth(void)
Definition: stack_depth.c:95
List * aggrefs
Definition: pathnodes.h:3511
List * aggdistinct
Definition: primnodes.h:493
List * args
Definition: primnodes.h:487
Expr * aggfilter
Definition: primnodes.h:496
List * aggorder
Definition: primnodes.h:490
GetForeignRowMarkType_function GetForeignRowMarkType
Definition: fdwapi.h:247
GetForeignUpperPaths_function GetForeignUpperPaths
Definition: fdwapi.h:226
Cardinality limit_tuples
Definition: pathnodes.h:3451
Node * quals
Definition: primnodes.h:2344
List * fromlist
Definition: primnodes.h:2343
int num_workers
Definition: plannodes.h:1332
bool invisible
Definition: plannodes.h:1338
bool single_copy
Definition: plannodes.h:1336
Plan plan
Definition: plannodes.h:1330
int rescan_param
Definition: plannodes.h:1334
PartitionwiseAggregateType patype
Definition: pathnodes.h:3435
AggClauseCosts agg_final_costs
Definition: pathnodes.h:3429
AggClauseCosts agg_partial_costs
Definition: pathnodes.h:3428
Cardinality numGroups
Definition: pathnodes.h:2414
Path path
Definition: pathnodes.h:1868
Definition: pg_list.h:54
Definition: nodes.h:135
List * exprs
Definition: pathnodes.h:1691
List * pathkeys
Definition: pathnodes.h:1824
Cardinality rows
Definition: pathnodes.h:1818
int disabled_nodes
Definition: pathnodes.h:1819
Cost total_cost
Definition: pathnodes.h:1821
LockClauseStrength strength
Definition: plannodes.h:1593
Index prti
Definition: plannodes.h:1585
RowMarkType markType
Definition: plannodes.h:1589
LockWaitPolicy waitPolicy
Definition: plannodes.h:1595
bool isParent
Definition: plannodes.h:1597
Index rowmarkId
Definition: plannodes.h:1587
int allMarkTypes
Definition: plannodes.h:1591
struct Plan * lefttree
Definition: plannodes.h:224
Cost total_cost
Definition: plannodes.h:190
struct Plan * righttree
Definition: plannodes.h:225
bool parallel_aware
Definition: plannodes.h:204
Cost startup_cost
Definition: plannodes.h:188
List * qual
Definition: plannodes.h:222
int plan_width
Definition: plannodes.h:198
bool parallel_safe
Definition: plannodes.h:206
Cardinality plan_rows
Definition: plannodes.h:196
List * targetlist
Definition: plannodes.h:220
List * initPlan
Definition: plannodes.h:227
struct Plan * planTree
Definition: plannodes.h:101
bool hasModifyingCTE
Definition: plannodes.h:83
List * appendRelations
Definition: plannodes.h:127
List * permInfos
Definition: plannodes.h:120
bool canSetTag
Definition: plannodes.h:86
List * rowMarks
Definition: plannodes.h:138
int64 planId
Definition: plannodes.h:74
int jitFlags
Definition: plannodes.h:98
Bitmapset * rewindPlanIDs
Definition: plannodes.h:135
int64 queryId
Definition: plannodes.h:71
ParseLoc stmt_len
Definition: plannodes.h:156
PlannedStmtOrigin planOrigin
Definition: plannodes.h:77
bool hasReturning
Definition: plannodes.h:80
ParseLoc stmt_location
Definition: plannodes.h:154
List * invalItems
Definition: plannodes.h:144
bool transientPlan
Definition: plannodes.h:89
List * resultRelations
Definition: plannodes.h:124
List * subplans
Definition: plannodes.h:132
List * relationOids
Definition: plannodes.h:141
bool dependsOnRole
Definition: plannodes.h:92
Bitmapset * unprunableRelids
Definition: plannodes.h:115
CmdType commandType
Definition: plannodes.h:68
Node * utilityStmt
Definition: plannodes.h:150
List * rtable
Definition: plannodes.h:109
List * partPruneInfos
Definition: plannodes.h:106
List * paramExecTypes
Definition: plannodes.h:147
bool parallelModeNeeded
Definition: plannodes.h:95
Bitmapset * prunableRelids
Definition: pathnodes.h:130
int lastPlanNodeId
Definition: pathnodes.h:163
char maxParallelHazard
Definition: pathnodes.h:178
List * subplans
Definition: pathnodes.h:105
bool dependsOnRole
Definition: pathnodes.h:169
Bitmapset * allRelids
Definition: pathnodes.h:123
List * appendRelations
Definition: pathnodes.h:142
List * finalrowmarks
Definition: pathnodes.h:136
List * invalItems
Definition: pathnodes.h:151
List * relationOids
Definition: pathnodes.h:148
List * paramExecTypes
Definition: pathnodes.h:154
bool parallelModeOK
Definition: pathnodes.h:172
bool transientPlan
Definition: pathnodes.h:166
Bitmapset * rewindPlanIDs
Definition: pathnodes.h:114
List * finalrteperminfos
Definition: pathnodes.h:133
List * subpaths
Definition: pathnodes.h:108
Index lastPHId
Definition: pathnodes.h:157
Index lastRowMarkId
Definition: pathnodes.h:160
List * resultRelations
Definition: pathnodes.h:139
List * partPruneInfos
Definition: pathnodes.h:145
List * finalrtable
Definition: pathnodes.h:117
bool parallelModeNeeded
Definition: pathnodes.h:175
Index query_level
Definition: pathnodes.h:226
Cost per_tuple
Definition: pathnodes.h:48
Cost startup
Definition: pathnodes.h:47
List * rtable
Definition: parsenodes.h:175
CmdType commandType
Definition: parsenodes.h:121
TableFunc * tablefunc
Definition: parsenodes.h:1213
struct TableSampleClause * tablesample
Definition: parsenodes.h:1127
Query * subquery
Definition: parsenodes.h:1133
List * values_lists
Definition: parsenodes.h:1219
JoinType jointype
Definition: parsenodes.h:1180
List * functions
Definition: parsenodes.h:1206
RTEKind rtekind
Definition: parsenodes.h:1076
List * ppilist
Definition: pathnodes.h:936
bool useridiscurrent
Definition: pathnodes.h:1000
Relids relids
Definition: pathnodes.h:908
struct PathTarget * reltarget
Definition: pathnodes.h:930
Index relid
Definition: pathnodes.h:954
List * unique_pathkeys
Definition: pathnodes.h:1019
Cardinality tuples
Definition: pathnodes.h:981
bool consider_parallel
Definition: pathnodes.h:924
BlockNumber pages
Definition: pathnodes.h:980
List * cheapest_parameterized_paths
Definition: pathnodes.h:940
List * pathlist
Definition: pathnodes.h:935
RelOptKind reloptkind
Definition: pathnodes.h:902
List * indexlist
Definition: pathnodes.h:976
struct Path * cheapest_startup_path
Definition: pathnodes.h:938
struct Path * cheapest_total_path
Definition: pathnodes.h:939
List * unique_groupclause
Definition: pathnodes.h:1021
Oid userid
Definition: pathnodes.h:998
Oid serverid
Definition: pathnodes.h:996
Bitmapset * live_parts
Definition: pathnodes.h:1081
int rel_parallel_workers
Definition: pathnodes.h:988
List * partial_pathlist
Definition: pathnodes.h:937
struct RelOptInfo * unique_rel
Definition: pathnodes.h:1017
Cardinality rows
Definition: pathnodes.h:914
Form_pg_class rd_rel
Definition: rel.h:111
Cardinality numGroups
Definition: pathnodes.h:2425
List * groupClause
Definition: pathnodes.h:2422
List * gsets_data
Definition: pathnodes.h:2424
bool hashable
Definition: pathnodes.h:2426
List * gsets
Definition: pathnodes.h:2423
bool is_hashed
Definition: pathnodes.h:2427
LockClauseStrength strength
Definition: parsenodes.h:1609
LockWaitPolicy waitPolicy
Definition: parsenodes.h:1610
Index tleSortGroupRef
Definition: parsenodes.h:1467
List * semi_rhs_exprs
Definition: pathnodes.h:3044
JoinType jointype
Definition: pathnodes.h:3033
Relids syn_righthand
Definition: pathnodes.h:3032
List * semi_operators
Definition: pathnodes.h:3043
Expr * expr
Definition: primnodes.h:2225
AttrNumber resno
Definition: primnodes.h:2227
Index ressortgroupref
Definition: primnodes.h:2231
Definition: primnodes.h:262
WindowClause * wc
Definition: planner.c:117
Node * startOffset
Definition: parsenodes.h:1576
List * partitionClause
Definition: parsenodes.h:1572
Node * endOffset
Definition: parsenodes.h:1577
List * orderClause
Definition: parsenodes.h:1574
List ** windowFuncs
Definition: clauses.h:23
Index maxWinRef
Definition: clauses.h:22
int numWindowFuncs
Definition: clauses.h:21
Index winref
Definition: primnodes.h:600
Oid winfnoid
Definition: primnodes.h:586
int * tleref_to_colnum_map
Definition: planner.c:108
Bitmapset * unhashable_refs
Definition: planner.c:106
List * unsortable_sets
Definition: planner.c:107
List * hash_sets_idx
Definition: planner.c:102
double dNumHashGroups
Definition: planner.c:103
Bitmapset * unsortable_refs
Definition: planner.c:105
Definition: type.h:96
List * activeWindows
Definition: planner.c:125
grouping_sets_data * gset_data
Definition: planner.c:126
SetOperationStmt * setop
Definition: planner.c:127
Definition: regguts.h:323
Node * SS_process_sublinks(PlannerInfo *root, Node *expr, bool isQual)
Definition: subselect.c:2060
void SS_process_ctes(PlannerInfo *root)
Definition: subselect.c:880
void SS_identify_outer_params(PlannerInfo *root)
Definition: subselect.c:2218
Node * SS_replace_correlation_vars(PlannerInfo *root, Node *expr)
Definition: subselect.c:2005
void SS_finalize_plan(PlannerInfo *root, Plan *plan)
Definition: subselect.c:2402
void SS_compute_initplan_cost(List *init_plans, Cost *initplan_cost_p, bool *unsafe_initplans_p)
Definition: subselect.c:2346
void SS_charge_for_initplans(PlannerInfo *root, RelOptInfo *final_rel)
Definition: subselect.c:2282
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TargetEntry * tlist_member(Expr *node, List *targetlist)
Definition: tlist.c:79
bool tlist_same_exprs(List *tlist1, List *tlist2)
Definition: tlist.c:218
SortGroupClause * get_sortgroupref_clause_noerr(Index sortref, List *clauses)
Definition: tlist.c:443
SortGroupClause * get_sortgroupref_clause(Index sortref, List *clauses)
Definition: tlist.c:422
bool grouping_is_sortable(List *groupClause)
Definition: tlist.c:540
List * make_tlist_from_pathtarget(PathTarget *target)
Definition: tlist.c:624
PathTarget * copy_pathtarget(PathTarget *src)
Definition: tlist.c:657
void add_new_columns_to_pathtarget(PathTarget *target, List *exprs)
Definition: tlist.c:752
PathTarget * create_empty_pathtarget(void)
Definition: tlist.c:681
List * get_sortgrouplist_exprs(List *sgClauses, List *targetList)
Definition: tlist.c:392
void split_pathtarget_at_srfs(PlannerInfo *root, PathTarget *target, PathTarget *input_target, List **targets, List **targets_contain_srfs)
Definition: tlist.c:881
bool grouping_is_hashable(List *groupClause)
Definition: tlist.c:560
void add_column_to_pathtarget(PathTarget *target, Expr *expr, Index sortgroupref)
Definition: tlist.c:695
#define create_pathtarget(root, tlist)
Definition: tlist.h:53
Node * flatten_group_exprs(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:968
Relids pull_varnos(PlannerInfo *root, Node *node)
Definition: var.c:114
List * pull_var_clause(Node *node, int flags)
Definition: var.c:653
Node * flatten_join_alias_vars(PlannerInfo *root, Query *query, Node *node)
Definition: var.c:789