diff options
Diffstat (limited to 'src/backend')
| -rw-r--r-- | src/backend/executor/execGrouping.c | 59 | ||||
| -rw-r--r-- | src/backend/executor/nodeSetOp.c | 9 | ||||
| -rw-r--r-- | src/backend/executor/nodeSubplan.c | 53 | ||||
| -rw-r--r-- | src/backend/optimizer/plan/subselect.c | 45 | ||||
| -rw-r--r-- | src/backend/optimizer/util/pathnode.c | 12 |
5 files changed, 149 insertions, 29 deletions
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index b4bdaa3c305..e1a3a813dd9 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -14,6 +14,7 @@ */ #include "postgres.h" +#include "access/htup_details.h" #include "access/parallel.h" #include "common/hashfn.h" #include "executor/executor.h" @@ -303,6 +304,64 @@ ResetTupleHashTable(TupleHashTable hashtable) } /* + * Estimate the amount of space needed for a TupleHashTable with nentries + * entries, if the tuples have average data width tupleWidth and the caller + * requires additionalsize extra space per entry. + * + * Return SIZE_MAX if it'd overflow size_t. + * + * nentries is "double" because this is meant for use by the planner, + * which typically works with double rowcount estimates. So we'd need to + * clamp to integer somewhere and that might as well be here. We do expect + * the value not to be NaN or negative, else the result will be garbage. + */ +Size +EstimateTupleHashTableSpace(double nentries, + Size tupleWidth, + Size additionalsize) +{ + Size sh_space; + double tuples_space; + + /* First estimate the space needed for the simplehash table */ + sh_space = tuplehash_estimate_space(nentries); + + /* Give up if that's already too big */ + if (sh_space >= SIZE_MAX) + return sh_space; + + /* + * Compute space needed for hashed tuples with additional data. nentries + * must be somewhat sane, so it should be safe to compute this product. + * + * We assume that the hashed tuples will be kept in a BumpContext so that + * there is not additional per-tuple overhead. + * + * (Note that this is only accurate if MEMORY_CONTEXT_CHECKING is off, + * else bump.c will add a MemoryChunk header to each tuple. However, it + * seems undesirable for debug builds to make different planning choices + * than production builds, so we assume the production behavior always.) + */ + tuples_space = nentries * (MAXALIGN(SizeofMinimalTupleHeader) + + MAXALIGN(tupleWidth) + + MAXALIGN(additionalsize)); + + /* + * Check for size_t overflow. This coding is trickier than it may appear, + * because on 64-bit machines SIZE_MAX cannot be represented exactly as a + * double. We must cast it explicitly to suppress compiler warnings about + * an inexact conversion, and we must trust that any double value that + * compares strictly less than "(double) SIZE_MAX" will cast to a + * representable size_t value. + */ + if (sh_space + tuples_space >= (double) SIZE_MAX) + return SIZE_MAX; + + /* We don't bother estimating size of the miscellaneous overhead data */ + return (Size) (sh_space + tuples_space); +} + +/* * Find or create a hashtable entry for the tuple group containing the * given tuple. The tuple must be the same type as the hashtable entries. * diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 7b223a7ca3a..5aabed18a09 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -111,6 +111,15 @@ build_hash_table(SetOpState *setopstate) false); } +/* Planner support routine to estimate space needed for hash table */ +Size +EstimateSetOpHashTableSpace(double nentries, Size tupleWidth) +{ + return EstimateTupleHashTableSpace(nentries, + tupleWidth, + sizeof(SetOpStatePerGroupData)); +} + /* * We've completed processing a tuple group. Decide how many copies (if any) * of its representative row to emit, and store the count into numOutput. diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 9f6e45bcb0b..1cd0988bb49 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -525,7 +525,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) node->tab_hash_funcs, node->tab_collations, nbuckets, - 0, + 0, /* no additional data */ node->planstate->state->es_query_cxt, node->tuplesContext, innerecontext->ecxt_per_tuple_memory, @@ -554,7 +554,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) node->tab_hash_funcs, node->tab_collations, nbuckets, - 0, + 0, /* no additional data */ node->planstate->state->es_query_cxt, node->tuplesContext, innerecontext->ecxt_per_tuple_memory, @@ -636,6 +636,55 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext) MemoryContextSwitchTo(oldcontext); } +/* Planner support routine to estimate space needed for hash table(s) */ +Size +EstimateSubplanHashTableSpace(double nentries, + Size tupleWidth, + bool unknownEqFalse) +{ + Size tab1space, + tab2space; + + /* Estimate size of main hashtable */ + tab1space = EstimateTupleHashTableSpace(nentries, + tupleWidth, + 0 /* no additional data */ ); + + /* Give up if that's already too big */ + if (tab1space >= SIZE_MAX) + return tab1space; + + /* Done if we don't need a hashnulls table */ + if (unknownEqFalse) + return tab1space; + + /* + * Adjust the rowcount estimate in the same way that buildSubPlanHash + * will, except that we don't bother with the special case for a single + * hash column. (We skip that detail because it'd be notationally painful + * for our caller to provide the column count, and this table has + * relatively little impact on the total estimate anyway.) + */ + nentries /= 16; + if (nentries < 1) + nentries = 1; + + /* + * It might be sane to also reduce the tupleWidth, but on the other hand + * we are not accounting for the space taken by the tuples' null bitmaps. + * Leave it alone for now. + */ + tab2space = EstimateTupleHashTableSpace(nentries, + tupleWidth, + 0 /* no additional data */ ); + + /* Guard against overflow */ + if (tab2space >= SIZE_MAX - tab1space) + return SIZE_MAX; + + return tab1space + tab2space; +} + /* * execTuplesUnequal * Return true if two tuples are definitely unequal in the indicated diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index 14192a13236..ff63d20f8d5 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -20,6 +20,7 @@ #include "catalog/pg_operator.h" #include "catalog/pg_type.h" #include "executor/executor.h" +#include "executor/nodeSubplan.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" @@ -79,8 +80,8 @@ static Node *convert_testexpr(PlannerInfo *root, List *subst_nodes); static Node *convert_testexpr_mutator(Node *node, convert_testexpr_context *context); -static bool subplan_is_hashable(Plan *plan); -static bool subpath_is_hashable(Path *path); +static bool subplan_is_hashable(Plan *plan, bool unknownEqFalse); +static bool subpath_is_hashable(Path *path, bool unknownEqFalse); static bool testexpr_is_hashable(Node *testexpr, List *param_ids); static bool test_opexpr_is_hashable(OpExpr *testexpr, List *param_ids); static bool hash_ok_operator(OpExpr *expr); @@ -283,7 +284,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, best_path = final_rel->cheapest_total_path; /* Now we can check if it'll fit in hash_mem */ - if (subpath_is_hashable(best_path)) + if (subpath_is_hashable(best_path, true)) { SubPlan *hashplan; AlternativeSubPlan *asplan; @@ -524,7 +525,7 @@ build_subplan(PlannerInfo *root, Plan *plan, Path *path, */ if (subLinkType == ANY_SUBLINK && splan->parParam == NIL && - subplan_is_hashable(plan) && + subplan_is_hashable(plan, unknownEqFalse) && testexpr_is_hashable(splan->testexpr, splan->paramIds)) splan->useHashTable = true; @@ -711,19 +712,19 @@ convert_testexpr_mutator(Node *node, * is suitable for hashing. We only look at the subquery itself. */ static bool -subplan_is_hashable(Plan *plan) +subplan_is_hashable(Plan *plan, bool unknownEqFalse) { - double subquery_size; + Size hashtablesize; /* - * The estimated size of the subquery result must fit in hash_mem. (Note: - * we use heap tuple overhead here even though the tuples will actually be - * stored as MinimalTuples; this provides some fudge factor for hashtable - * overhead.) + * The estimated size of the hashtable holding the subquery result must + * fit in hash_mem. (Note: reject on equality, to ensure that an estimate + * of SIZE_MAX disables hashing regardless of the hash_mem limit.) */ - subquery_size = plan->plan_rows * - (MAXALIGN(plan->plan_width) + MAXALIGN(SizeofHeapTupleHeader)); - if (subquery_size > get_hash_memory_limit()) + hashtablesize = EstimateSubplanHashTableSpace(plan->plan_rows, + plan->plan_width, + unknownEqFalse); + if (hashtablesize >= get_hash_memory_limit()) return false; return true; @@ -735,19 +736,19 @@ subplan_is_hashable(Plan *plan) * Identical to subplan_is_hashable, but work from a Path for the subplan. */ static bool -subpath_is_hashable(Path *path) +subpath_is_hashable(Path *path, bool unknownEqFalse) { - double subquery_size; + Size hashtablesize; /* - * The estimated size of the subquery result must fit in hash_mem. (Note: - * we use heap tuple overhead here even though the tuples will actually be - * stored as MinimalTuples; this provides some fudge factor for hashtable - * overhead.) + * The estimated size of the hashtable holding the subquery result must + * fit in hash_mem. (Note: reject on equality, to ensure that an estimate + * of SIZE_MAX disables hashing regardless of the hash_mem limit.) */ - subquery_size = path->rows * - (MAXALIGN(path->pathtarget->width) + MAXALIGN(SizeofHeapTupleHeader)); - if (subquery_size > get_hash_memory_limit()) + hashtablesize = EstimateSubplanHashTableSpace(path->rows, + path->pathtarget->width, + unknownEqFalse); + if (hashtablesize >= get_hash_memory_limit()) return false; return true; diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 44ac5312edd..e4fd6950fad 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -17,6 +17,7 @@ #include <math.h> #include "access/htup_details.h" +#include "executor/nodeSetOp.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "nodes/extensible.h" @@ -3461,7 +3462,7 @@ create_setop_path(PlannerInfo *root, } else { - Size hashentrysize; + Size hashtablesize; /* * In hashed mode, we must read all the input before we can emit @@ -3490,11 +3491,12 @@ create_setop_path(PlannerInfo *root, /* * Also disable if it doesn't look like the hashtable will fit into - * hash_mem. + * hash_mem. (Note: reject on equality, to ensure that an estimate of + * SIZE_MAX disables hashing regardless of the hash_mem limit.) */ - hashentrysize = MAXALIGN(leftpath->pathtarget->width) + - MAXALIGN(SizeofMinimalTupleHeader); - if (hashentrysize * numGroups > get_hash_memory_limit()) + hashtablesize = EstimateSetOpHashTableSpace(numGroups, + leftpath->pathtarget->width); + if (hashtablesize >= get_hash_memory_limit()) pathnode->path.disabled_nodes++; } pathnode->path.rows = outputRows; |
