summaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2025-11-02 16:57:43 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2025-11-02 16:57:43 -0500
commit8f29467c57f44cc2cdd9e4e53c6ab1b07375d5b4 (patch)
treefa4611b9c1c538f35e0842f0434cca5374d66456 /src/backend/executor
parent1ea5bdb00bfbc6f8034859cd19769346bf31dc53 (diff)
Change "long" numGroups fields to be Cardinality (i.e., double).
We've been nibbling away at removing uses of "long" for a long time, since its width is platform-dependent. Here's one more: change the remaining "long" fields in Plan nodes to Cardinality, since the three surviving examples all represent group-count estimates. The upstream planner code was converted to Cardinality some time ago; for example the corresponding fields in Path nodes are type Cardinality, as are the arguments of the make_foo_path functions. Downstream in the executor, it turns out that these all feed to the table-size argument of BuildTupleHashTable. Change that to "double" as well, and fix it so that it safely clamps out-of-range values to the uint32 limit of simplehash.h, as was not being done before. Essentially, this is removing all the artificial datatype-dependent limitations on these values from upstream processing, and applying just one clamp at the moment where we're forced to do so by the datatype choices of simplehash.h. Also, remove BuildTupleHashTable's misguided attempt to enforce work_mem/hash_mem_limit. It doesn't have enough information (particularly not the expected tuple width) to do that accurately, and it has no real business second-guessing the caller's choice. For all these plan types, it's really the planner's responsibility to not choose a hashed implementation if the hashtable is expected to exceed hash_mem_limit. The previous patch improved the accuracy of those estimates, and even if BuildTupleHashTable had more information it should arrive at the same conclusions. Reported-by: Jeff Janes <jeff.janes@gmail.com> Author: Tom Lane <tgl@sss.pgh.pa.us> Reviewed-by: David Rowley <dgrowleyml@gmail.com> Discussion: https://postgr.es/m/CAMkU=1zia0JfW_QR8L5xA2vpa0oqVuiapm78h=WpNsHH13_9uw@mail.gmail.com
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execGrouping.c36
-rw-r--r--src/backend/executor/nodeAgg.c30
-rw-r--r--src/backend/executor/nodeRecursiveunion.c1
-rw-r--r--src/backend/executor/nodeSetOp.c1
-rw-r--r--src/backend/executor/nodeSubplan.c19
5 files changed, 43 insertions, 44 deletions
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index e1a3a813dd9..8b64a625ca5 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -14,6 +14,8 @@
*/
#include "postgres.h"
+#include <math.h>
+
#include "access/htup_details.h"
#include "access/parallel.h"
#include "common/hashfn.h"
@@ -144,7 +146,7 @@ execTuplesHashPrepare(int numCols,
* eqfuncoids: OIDs of equality comparison functions to use
* hashfunctions: FmgrInfos of datatype-specific hashing functions to use
* collations: collations to use in comparisons
- * nbuckets: initial estimate of hashtable size
+ * nelements: initial estimate of hashtable size
* additionalsize: size of data that may be stored along with the hash entry
* metacxt: memory context for long-lived data and the simplehash table
* tuplescxt: memory context in which to store the hashed tuples themselves
@@ -187,7 +189,7 @@ BuildTupleHashTable(PlanState *parent,
const Oid *eqfuncoids,
FmgrInfo *hashfunctions,
Oid *collations,
- long nbuckets,
+ double nelements,
Size additionalsize,
MemoryContext metacxt,
MemoryContext tuplescxt,
@@ -195,12 +197,24 @@ BuildTupleHashTable(PlanState *parent,
bool use_variable_hash_iv)
{
TupleHashTable hashtable;
- Size entrysize;
- Size hash_mem_limit;
+ uint32 nbuckets;
MemoryContext oldcontext;
uint32 hash_iv = 0;
- Assert(nbuckets > 0);
+ /*
+ * tuplehash_create requires a uint32 element count, so we had better
+ * clamp the given nelements to fit in that. As long as we have to do
+ * that, we might as well protect against completely insane input like
+ * zero or NaN. But it is not our job here to enforce issues like staying
+ * within hash_mem: the caller should have done that, and we don't have
+ * enough info to second-guess.
+ */
+ if (isnan(nelements) || nelements <= 0)
+ nbuckets = 1;
+ else if (nelements >= PG_UINT32_MAX)
+ nbuckets = PG_UINT32_MAX;
+ else
+ nbuckets = (uint32) nelements;
/* tuplescxt must be separate, else ResetTupleHashTable breaks things */
Assert(metacxt != tuplescxt);
@@ -208,18 +222,6 @@ BuildTupleHashTable(PlanState *parent,
/* ensure additionalsize is maxalign'ed */
additionalsize = MAXALIGN(additionalsize);
- /*
- * Limit initial table size request to not more than hash_mem.
- *
- * XXX this calculation seems pretty misguided, as it counts only overhead
- * and not the tuples themselves. But we have no knowledge of the
- * expected tuple width here.
- */
- entrysize = sizeof(TupleHashEntryData) + additionalsize;
- hash_mem_limit = get_hash_memory_limit() / entrysize;
- if (nbuckets > hash_mem_limit)
- nbuckets = hash_mem_limit;
-
oldcontext = MemoryContextSwitchTo(metacxt);
hashtable = (TupleHashTable) palloc(sizeof(TupleHashTableData));
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 759ffeed2e6..0b02fd32107 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -402,12 +402,12 @@ static void find_cols(AggState *aggstate, Bitmapset **aggregated,
Bitmapset **unaggregated);
static bool find_cols_walker(Node *node, FindColsContext *context);
static void build_hash_tables(AggState *aggstate);
-static void build_hash_table(AggState *aggstate, int setno, long nbuckets);
+static void build_hash_table(AggState *aggstate, int setno, double nbuckets);
static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
bool nullcheck);
static void hash_create_memory(AggState *aggstate);
-static long hash_choose_num_buckets(double hashentrysize,
- long ngroups, Size memory);
+static double hash_choose_num_buckets(double hashentrysize,
+ double ngroups, Size memory);
static int hash_choose_num_partitions(double input_groups,
double hashentrysize,
int used_bits,
@@ -1469,7 +1469,7 @@ build_hash_tables(AggState *aggstate)
for (setno = 0; setno < aggstate->num_hashes; ++setno)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- long nbuckets;
+ double nbuckets;
Size memory;
if (perhash->hashtable != NULL)
@@ -1478,8 +1478,6 @@ build_hash_tables(AggState *aggstate)
continue;
}
- Assert(perhash->aggnode->numGroups > 0);
-
memory = aggstate->hash_mem_limit / aggstate->num_hashes;
/* choose reasonable number of buckets per hashtable */
@@ -1505,7 +1503,7 @@ build_hash_tables(AggState *aggstate)
* Build a single hashtable for this grouping set.
*/
static void
-build_hash_table(AggState *aggstate, int setno, long nbuckets)
+build_hash_table(AggState *aggstate, int setno, double nbuckets)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
MemoryContext metacxt = aggstate->hash_metacxt;
@@ -2053,11 +2051,11 @@ hash_create_memory(AggState *aggstate)
/*
* Choose a reasonable number of buckets for the initial hash table size.
*/
-static long
-hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
+static double
+hash_choose_num_buckets(double hashentrysize, double ngroups, Size memory)
{
- long max_nbuckets;
- long nbuckets = ngroups;
+ double max_nbuckets;
+ double nbuckets = ngroups;
max_nbuckets = memory / hashentrysize;
@@ -2065,12 +2063,16 @@ hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
* Underestimating is better than overestimating. Too many buckets crowd
* out space for group keys and transition state values.
*/
- max_nbuckets >>= 1;
+ max_nbuckets /= 2;
if (nbuckets > max_nbuckets)
nbuckets = max_nbuckets;
- return Max(nbuckets, 1);
+ /*
+ * BuildTupleHashTable will clamp any obviously-insane result, so we don't
+ * need to be too careful here.
+ */
+ return nbuckets;
}
/*
@@ -3686,7 +3688,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
if (use_hashing)
{
Plan *outerplan = outerPlan(node);
- uint64 totalGroups = 0;
+ double totalGroups = 0;
aggstate->hash_spill_rslot = ExecInitExtraTupleSlot(estate, scanDesc,
&TTSOpsMinimalTuple);
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index ebb7919b49b..cd0ad51dcd2 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -35,7 +35,6 @@ build_hash_table(RecursiveUnionState *rustate)
TupleDesc desc = ExecGetResultType(outerPlanState(rustate));
Assert(node->numCols > 0);
- Assert(node->numGroups > 0);
/*
* If both child plans deliver the same fixed tuple slot type, we can tell
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 5aabed18a09..9e0f9274fb1 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -88,7 +88,6 @@ build_hash_table(SetOpState *setopstate)
TupleDesc desc = ExecGetResultType(outerPlanState(setopstate));
Assert(node->strategy == SETOP_HASHED);
- Assert(node->numGroups > 0);
/*
* If both child plans deliver the same fixed tuple slot type, we can tell
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 1cd0988bb49..c8b7bd9eb66 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -34,7 +34,6 @@
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
-#include "optimizer/optimizer.h"
#include "utils/array.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
@@ -481,7 +480,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
int ncols = node->numCols;
ExprContext *innerecontext = node->innerecontext;
MemoryContext oldcontext;
- long nbuckets;
+ double nentries;
TupleTableSlot *slot;
Assert(subplan->subLinkType == ANY_SUBLINK);
@@ -509,9 +508,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
node->havehashrows = false;
node->havenullrows = false;
- nbuckets = clamp_cardinality_to_long(planstate->plan->plan_rows);
- if (nbuckets < 1)
- nbuckets = 1;
+ nentries = planstate->plan->plan_rows;
if (node->hashtable)
ResetTupleHashTable(node->hashtable);
@@ -524,7 +521,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
node->tab_eq_funcoids,
node->tab_hash_funcs,
node->tab_collations,
- nbuckets,
+ nentries,
0, /* no additional data */
node->planstate->state->es_query_cxt,
node->tuplesContext,
@@ -534,12 +531,12 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
if (!subplan->unknownEqFalse)
{
if (ncols == 1)
- nbuckets = 1; /* there can only be one entry */
+ nentries = 1; /* there can only be one entry */
else
{
- nbuckets /= 16;
- if (nbuckets < 1)
- nbuckets = 1;
+ nentries /= 16;
+ if (nentries < 1)
+ nentries = 1;
}
if (node->hashnulls)
@@ -553,7 +550,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
node->tab_eq_funcoids,
node->tab_hash_funcs,
node->tab_collations,
- nbuckets,
+ nentries,
0, /* no additional data */
node->planstate->state->es_query_cxt,
node->tuplesContext,