summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2009-10-30 20:59:16 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2009-10-30 20:59:16 +0000
commit73b4c2b853df82db09d81cb4050b9a80dc326028 (patch)
treeb30087465e68d2e0091df4471d3736ee471153e6
parentb9d803bc82954dd7278f28b7470a6d3943f80ef9 (diff)
Make the overflow guards in ExecChooseHashTableSize be more protective.
The original coding ensured nbuckets and nbatch didn't exceed INT_MAX, which while not insane on its own terms did nothing to protect subsequent code like "palloc(nbatch * sizeof(BufFile *))". Since enormous join size estimates might well be planner error rather than reality, it seems best to constrain the initial sizes to be not more than work_mem/sizeof(pointer), thus ensuring the allocated arrays don't exceed work_mem. We will allow nbatch to get bigger than that during subsequent ExecHashIncreaseNumBatches calls, but we should still guard against integer overflow in those palloc requests. Per bug #5145 from Bernt Marius Johnsen. Although the given test case only seems to fail back to 8.2, previous releases have variants of this issue, so patch all supported branches.
-rw-r--r--src/backend/executor/nodeHash.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 73cc440bb67..990b288bbc2 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.88 2004/12/31 21:59:45 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.88.4.1 2009/10/30 20:59:16 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -380,6 +380,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
nbuckets = (int) (hash_table_bytes / (bucketsize * FUDGE_FAC));
if (nbuckets <= 0)
nbuckets = 1;
+ /* Ensure we can allocate an array of nbuckets pointers */
+ nbuckets = Min(nbuckets, MaxAllocSize / sizeof(void *));
if (totalbuckets <= nbuckets)
{
@@ -404,10 +406,10 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
*/
dtmp = ceil((inner_rel_bytes - hash_table_bytes) /
hash_table_bytes);
- if (dtmp < INT_MAX)
+ if (dtmp < MaxAllocSize / sizeof(void *))
nbatch = (int) dtmp;
else
- nbatch = INT_MAX;
+ nbatch = MaxAllocSize / sizeof(void *);
if (nbatch <= 0)
nbatch = 1;
}