summaryrefslogtreecommitdiff
path: root/src/backend/optimizer/prep/prepunion.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer/prep/prepunion.c')
-rw-r--r--src/backend/optimizer/prep/prepunion.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 6588f83d5ec..2ebd4ea3320 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -1018,6 +1018,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
const char *construct)
{
int numGroupCols = list_length(groupClauses);
+ int hash_mem = get_hash_mem();
bool can_sort;
bool can_hash;
Size hashentrysize;
@@ -1049,15 +1050,17 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
/*
* Don't do it if it doesn't look like the hashtable will fit into
- * work_mem.
+ * hash_mem.
*/
hashentrysize = MAXALIGN(input_path->pathtarget->width) + MAXALIGN(SizeofMinimalTupleHeader);
- if (hashentrysize * dNumGroups > work_mem * 1024L)
+ if (hashentrysize * dNumGroups > hash_mem * 1024L)
return false;
/*
- * See if the estimated cost is no more than doing it the other way.
+ * See if the estimated cost is no more than doing it the other way. We
+ * deliberately give the hash case more memory when hash_mem exceeds
+ * standard work mem (i.e. when hash_mem_multiplier exceeds 1.0).
*
* We need to consider input_plan + hashagg versus input_plan + sort +
* group. Note that the actual result plan might involve a SetOp or