summaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execGrouping.c205
-rw-r--r--src/backend/executor/execQual.c33
-rw-r--r--src/backend/executor/nodeAgg.c11
-rw-r--r--src/backend/executor/nodeIndexscan.c178
-rw-r--r--src/backend/executor/nodeSubplan.c6
5 files changed, 310 insertions, 123 deletions
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 100e7a1c375..0307386c9e0 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.7 2003/08/08 21:41:34 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.7.2.1 2003/09/07 04:36:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,6 +23,13 @@
#include "utils/syscache.h"
+static TupleHashTable CurTupleHashTable = NULL;
+
+static uint32 TupleHashTableHash(const void *key, Size keysize);
+static int TupleHashTableMatch(const void *key1, const void *key2,
+ Size keysize);
+
+
/*****************************************************************************
* Utility routines for grouping tuples together
*****************************************************************************/
@@ -272,7 +279,7 @@ execTuplesHashPrepare(TupleDesc tupdesc,
* numCols, keyColIdx: identify the tuple fields to use as lookup key
* eqfunctions: equality comparison functions to use
* hashfunctions: datatype-specific hashing functions to use
- * nbuckets: number of buckets to make
+ * nbuckets: initial estimate of hashtable size
* entrysize: size of each entry (at least sizeof(TupleHashEntryData))
* tablecxt: memory context in which to store table and table entries
* tempcxt: short-lived context for evaluation hash and comparison functions
@@ -290,14 +297,13 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
MemoryContext tablecxt, MemoryContext tempcxt)
{
TupleHashTable hashtable;
- Size tabsize;
+ HASHCTL hash_ctl;
Assert(nbuckets > 0);
Assert(entrysize >= sizeof(TupleHashEntryData));
- tabsize = sizeof(TupleHashTableData) +
- (nbuckets - 1) *sizeof(TupleHashEntry);
- hashtable = (TupleHashTable) MemoryContextAllocZero(tablecxt, tabsize);
+ hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt,
+ sizeof(TupleHashTableData));
hashtable->numCols = numCols;
hashtable->keyColIdx = keyColIdx;
@@ -306,7 +312,20 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
hashtable->tablecxt = tablecxt;
hashtable->tempcxt = tempcxt;
hashtable->entrysize = entrysize;
- hashtable->nbuckets = nbuckets;
+
+ MemSet(&hash_ctl, 0, sizeof(hash_ctl));
+ hash_ctl.keysize = sizeof(TupleHashEntryData);
+ hash_ctl.entrysize = entrysize;
+ hash_ctl.hash = TupleHashTableHash;
+ hash_ctl.match = TupleHashTableMatch;
+ hash_ctl.hcxt = tablecxt;
+ hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets,
+ &hash_ctl,
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+ if (hashtable->hashtab == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
return hashtable;
}
@@ -327,19 +346,93 @@ TupleHashEntry
LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew)
{
- int numCols = hashtable->numCols;
- AttrNumber *keyColIdx = hashtable->keyColIdx;
HeapTuple tuple = slot->val;
TupleDesc tupdesc = slot->ttc_tupleDescriptor;
- uint32 hashkey = 0;
- int i;
- int bucketno;
TupleHashEntry entry;
MemoryContext oldContext;
+ TupleHashTable saveCurHT;
+ bool found;
- /* Need to run the hash function in short-lived context */
+ /* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);
+ /*
+ * Set up data needed by hash and match functions
+ *
+ * We save and restore CurTupleHashTable just in case someone manages
+ * to invoke this code re-entrantly.
+ */
+ hashtable->tupdesc = tupdesc;
+ saveCurHT = CurTupleHashTable;
+ CurTupleHashTable = hashtable;
+
+ /* Search the hash table */
+ entry = (TupleHashEntry) hash_search(hashtable->hashtab,
+ &tuple,
+ isnew ? HASH_ENTER : HASH_FIND,
+ &found);
+
+ if (isnew)
+ {
+ if (found)
+ {
+ /* found pre-existing entry */
+ *isnew = false;
+ }
+ else
+ {
+ /* created new entry ... we hope */
+ if (entry == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+
+ /*
+ * Zero any caller-requested space in the entry. (This zaps
+ * the "key data" dynahash.c copied into the new entry, but
+ * we don't care since we're about to overwrite it anyway.)
+ */
+ MemSet(entry, 0, hashtable->entrysize);
+
+ /* Copy the first tuple into the table context */
+ MemoryContextSwitchTo(hashtable->tablecxt);
+ entry->firstTuple = heap_copytuple(tuple);
+
+ *isnew = true;
+ }
+ }
+
+ CurTupleHashTable = saveCurHT;
+
+ MemoryContextSwitchTo(oldContext);
+
+ return entry;
+}
+
+/*
+ * Compute the hash value for a tuple
+ *
+ * The passed-in key is a pointer to a HeapTuple pointer -- this is either
+ * the firstTuple field of a TupleHashEntry struct, or the key value passed
+ * to hash_search. We ignore the keysize.
+ *
+ * CurTupleHashTable must be set before calling this, since dynahash.c
+ * doesn't provide any API that would let us get at the hashtable otherwise.
+ *
+ * Also, the caller must select an appropriate memory context for running
+ * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.)
+ */
+static uint32
+TupleHashTableHash(const void *key, Size keysize)
+{
+ HeapTuple tuple = *(const HeapTuple *) key;
+ TupleHashTable hashtable = CurTupleHashTable;
+ int numCols = hashtable->numCols;
+ AttrNumber *keyColIdx = hashtable->keyColIdx;
+ TupleDesc tupdesc = hashtable->tupdesc;
+ uint32 hashkey = 0;
+ int i;
+
for (i = 0; i < numCols; i++)
{
AttrNumber att = keyColIdx[i];
@@ -360,72 +453,36 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
hashkey ^= hkey;
}
}
- bucketno = hashkey % (uint32) hashtable->nbuckets;
-
- for (entry = hashtable->buckets[bucketno];
- entry != NULL;
- entry = entry->next)
- {
- /* Quick check using hashkey */
- if (entry->hashkey != hashkey)
- continue;
- if (execTuplesMatch(entry->firstTuple,
- tuple,
- tupdesc,
- numCols, keyColIdx,
- hashtable->eqfunctions,
- hashtable->tempcxt))
- {
- if (isnew)
- *isnew = false;
- MemoryContextSwitchTo(oldContext);
- return entry;
- }
- }
-
- /* Not there, so build a new one if requested */
- if (isnew)
- {
- MemoryContextSwitchTo(hashtable->tablecxt);
-
- entry = (TupleHashEntry) palloc0(hashtable->entrysize);
-
- entry->hashkey = hashkey;
- entry->firstTuple = heap_copytuple(tuple);
-
- entry->next = hashtable->buckets[bucketno];
- hashtable->buckets[bucketno] = entry;
-
- *isnew = true;
- }
-
- MemoryContextSwitchTo(oldContext);
- return entry;
+ return hashkey;
}
/*
- * Walk through all the entries of a hash table, in no special order.
- * Returns NULL when no more entries remain.
+ * See whether two tuples (presumably of the same hash value) match
+ *
+ * As above, the passed pointers are pointers to HeapTuple pointers.
*
- * Iterator state must be initialized with ResetTupleHashIterator() macro.
+ * CurTupleHashTable must be set before calling this, since dynahash.c
+ * doesn't provide any API that would let us get at the hashtable otherwise.
+ *
+ * Also, the caller must select an appropriate memory context for running
+ * the compare functions. (dynahash.c doesn't change CurrentMemoryContext.)
*/
-TupleHashEntry
-ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator *state)
+static int
+TupleHashTableMatch(const void *key1, const void *key2, Size keysize)
{
- TupleHashEntry entry;
-
- entry = state->next_entry;
- while (entry == NULL)
- {
- if (state->next_bucket >= hashtable->nbuckets)
- {
- /* No more entries in hashtable, so done */
- return NULL;
- }
- entry = hashtable->buckets[state->next_bucket++];
- }
- state->next_entry = entry->next;
-
- return entry;
+ HeapTuple tuple1 = *(const HeapTuple *) key1;
+ HeapTuple tuple2 = *(const HeapTuple *) key2;
+ TupleHashTable hashtable = CurTupleHashTable;
+
+ if (execTuplesMatch(tuple1,
+ tuple2,
+ hashtable->tupdesc,
+ hashtable->numCols,
+ hashtable->keyColIdx,
+ hashtable->eqfunctions,
+ hashtable->tempcxt))
+ return 0;
+ else
+ return 1;
}
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 4ecae71cd68..cce62237085 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.141 2003/08/08 21:41:39 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.141.2.1 2003/09/07 04:36:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1620,16 +1620,18 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
ArrayType *result;
List *element;
Oid element_type = arrayExpr->element_typeid;
- int ndims = arrayExpr->ndims;
+ int ndims = 0;
int dims[MAXDIM];
int lbs[MAXDIM];
- if (ndims == 1)
+ if (!arrayExpr->multidims)
{
+ /* Elements are presumably of scalar type */
int nelems;
Datum *dvalues;
int i = 0;
+ ndims = 1;
nelems = length(astate->elements);
/* Shouldn't happen here, but if length is 0, return NULL */
@@ -1667,6 +1669,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
}
else
{
+ /* Must be nested array expressions */
char *dat = NULL;
Size ndatabytes = 0;
int nbytes;
@@ -1677,12 +1680,6 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
bool firstone = true;
int i;
- if (ndims <= 0 || ndims > MAXDIM)
- ereport(ERROR,
- (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions exceeds the maximum allowed, %d",
- MAXDIM)));
-
/* loop through and get data area from each element */
foreach(element, astate->elements)
{
@@ -1701,14 +1698,32 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
array = DatumGetArrayTypeP(arraydatum);
+ /* run-time double-check on element type */
+ if (element_type != ARR_ELEMTYPE(array))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("cannot merge incompatible arrays"),
+ errdetail("Array with element type %s cannot be "
+ "included in ARRAY construct with element type %s.",
+ format_type_be(ARR_ELEMTYPE(array)),
+ format_type_be(element_type))));
+
if (firstone)
{
/* Get sub-array details from first member */
elem_ndims = ARR_NDIM(array);
+ ndims = elem_ndims + 1;
+ if (ndims <= 0 || ndims > MAXDIM)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of array dimensions exceeds " \
+ "the maximum allowed, %d", MAXDIM)));
+
elem_dims = (int *) palloc(elem_ndims * sizeof(int));
memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int));
elem_lbs = (int *) palloc(elem_ndims * sizeof(int));
memcpy(elem_lbs, ARR_LBOUND(array), elem_ndims * sizeof(int));
+
firstone = false;
}
else
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index d8fb9a9565d..1ac046b65f4 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -45,7 +45,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.115 2003/08/08 21:41:41 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.115.2.1 2003/09/07 04:36:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -905,7 +905,7 @@ agg_fill_hash_table(AggState *aggstate)
aggstate->table_filled = true;
/* Initialize to walk the hash table */
- ResetTupleHashIterator(&aggstate->hashiter);
+ ResetTupleHashIterator(aggstate->hashtable, &aggstate->hashiter);
}
/*
@@ -920,7 +920,6 @@ agg_retrieve_hash_table(AggState *aggstate)
bool *aggnulls;
AggStatePerAgg peragg;
AggStatePerGroup pergroup;
- TupleHashTable hashtable;
AggHashEntry entry;
TupleTableSlot *firstSlot;
TupleTableSlot *resultSlot;
@@ -935,7 +934,6 @@ agg_retrieve_hash_table(AggState *aggstate)
aggnulls = econtext->ecxt_aggnulls;
projInfo = aggstate->ss.ps.ps_ProjInfo;
peragg = aggstate->peragg;
- hashtable = aggstate->hashtable;
firstSlot = aggstate->ss.ss_ScanTupleSlot;
/*
@@ -950,8 +948,7 @@ agg_retrieve_hash_table(AggState *aggstate)
/*
* Find the next entry in the hash table
*/
- entry = (AggHashEntry) ScanTupleHashTable(hashtable,
- &aggstate->hashiter);
+ entry = (AggHashEntry) ScanTupleHashTable(&aggstate->hashiter);
if (entry == NULL)
{
/* No more entries in hashtable, so done */
@@ -1440,7 +1437,7 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
{
- ResetTupleHashIterator(&node->hashiter);
+ ResetTupleHashIterator(node->hashtable, &node->hashiter);
return;
}
}
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index bfbcaf208de..1ea753ea303 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.82 2003/08/04 02:39:59 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.82.2.1 2003/09/07 04:36:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,19 +28,51 @@
#include "access/heapam.h"
#include "executor/execdebug.h"
#include "executor/nodeIndexscan.h"
+#include "miscadmin.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h"
#include "parser/parsetree.h"
-/* ----------------
- * Misc stuff to move to executor.h soon -cim 6/5/90
- * ----------------
- */
+
#define NO_OP 0
#define LEFT_OP 1
#define RIGHT_OP 2
+/*
+ * In a multiple-index plan, we must take care to return any given tuple
+ * only once, even if it matches conditions of several index scans. Our
+ * preferred way to do this is to record already-returned tuples in a hash
+ * table (using the TID as unique identifier). However, in a very large
+ * scan this could conceivably run out of memory. We limit the hash table
+ * to no more than SortMem KB; if it grows past that, we fall back to the
+ * pre-7.4 technique: evaluate the prior-scan index quals again for each
+ * tuple (which is space-efficient, but slow).
+ *
+ * When scanning backwards, we use scannum to determine when to emit the
+ * tuple --- we have to re-emit a tuple in the same scan as it was first
+ * encountered.
+ *
+ * Note: this code would break if the planner were ever to create a multiple
+ * index plan with overall backwards direction, because the hashtable code
+ * will emit a tuple the first time it is encountered (which would be the
+ * highest scan in which it matches the index), but the evaluate-the-quals
+ * code will emit a tuple in the lowest-numbered scan in which it's valid.
+ * This could be fixed at need by making the evaluate-the-quals case more
+ * complex. Currently the planner will never create such a plan (since it
+ * considers multi-index plans unordered anyway), so there's no need for
+ * more complexity.
+ */
+typedef struct
+{
+ /* tid is the hash key and so must be first! */
+ ItemPointerData tid; /* TID of a tuple we've returned */
+ int scannum; /* number of scan we returned it in */
+} DupHashTabEntry;
+
+
static TupleTableSlot *IndexNext(IndexScanState *node);
+static void create_duphash(IndexScanState *node);
+
/* ----------------------------------------------------------------
* IndexNext
@@ -163,7 +195,7 @@ IndexNext(IndexScanState *node)
while ((tuple = index_getnext(scandesc, direction)) != NULL)
{
/*
- * store the scanned tuple in the scan tuple slot of the scan
+ * Store the scanned tuple in the scan tuple slot of the scan
* state. Note: we pass 'false' because tuples returned by
* amgetnext are pointers onto disk pages and must not be
* pfree()'d.
@@ -174,36 +206,80 @@ IndexNext(IndexScanState *node)
false); /* don't pfree */
/*
- * We must check to see if the current tuple was already
- * matched by an earlier index, so we don't double-report it.
- * We do this by passing the tuple through ExecQual and
- * checking for failure with all previous qualifications.
+ * If it's a multiple-index scan, make sure not to double-report
+ * a tuple matched by more than one index. (See notes above.)
*/
- if (node->iss_IndexPtr > 0)
+ if (numIndices > 1)
{
- bool prev_matches = false;
- int prev_index;
- List *qual;
-
- econtext->ecxt_scantuple = slot;
- ResetExprContext(econtext);
- qual = node->indxqualorig;
- for (prev_index = 0;
- prev_index < node->iss_IndexPtr;
- prev_index++)
+ /* First try the hash table */
+ if (node->iss_DupHash)
{
- if (ExecQual((List *) lfirst(qual), econtext, false))
+ DupHashTabEntry *entry;
+ bool found;
+
+ entry = (DupHashTabEntry *)
+ hash_search(node->iss_DupHash,
+ &tuple->t_data->t_ctid,
+ HASH_ENTER,
+ &found);
+ if (entry == NULL ||
+ node->iss_DupHash->hctl->nentries > node->iss_MaxHash)
+ {
+ /* out of memory (either hard or soft limit) */
+ /* release hash table and fall thru to old code */
+ hash_destroy(node->iss_DupHash);
+ node->iss_DupHash = NULL;
+ }
+ else if (found)
{
- prev_matches = true;
- break;
+ /* pre-existing entry */
+
+ /*
+ * It's duplicate if first emitted in a different
+ * scan. If same scan, we must be backing up, so
+ * okay to emit again.
+ */
+ if (entry->scannum != node->iss_IndexPtr)
+ {
+ /* Dup, so drop it and loop back for another */
+ ExecClearTuple(slot);
+ continue;
+ }
+ }
+ else
+ {
+ /* new entry, finish filling it in */
+ entry->scannum = node->iss_IndexPtr;
}
- qual = lnext(qual);
}
- if (prev_matches)
+ /* If hash table has overflowed, do it the hard way */
+ if (node->iss_DupHash == NULL &&
+ node->iss_IndexPtr > 0)
{
- /* Duplicate, so drop it and loop back for another */
- ExecClearTuple(slot);
- continue;
+ bool prev_matches = false;
+ int prev_index;
+ List *qual;
+
+ econtext->ecxt_scantuple = slot;
+ ResetExprContext(econtext);
+ qual = node->indxqualorig;
+ for (prev_index = 0;
+ prev_index < node->iss_IndexPtr;
+ prev_index++)
+ {
+ if (ExecQual((List *) lfirst(qual), econtext, false))
+ {
+ prev_matches = true;
+ break;
+ }
+ qual = lnext(qual);
+ }
+ if (prev_matches)
+ {
+ /* Dup, so drop it and loop back for another */
+ ExecClearTuple(slot);
+ continue;
+ }
}
}
@@ -383,6 +459,14 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
return;
}
+ /* reset hash table */
+ if (numIndices > 1)
+ {
+ if (node->iss_DupHash)
+ hash_destroy(node->iss_DupHash);
+ create_duphash(node);
+ }
+
/* reset index scans */
if (ScanDirectionIsBackward(((IndexScan *) node->ss.ps.plan)->indxorderdir))
node->iss_IndexPtr = numIndices;
@@ -432,6 +516,10 @@ ExecEndIndexScan(IndexScanState *node)
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
ExecClearTuple(node->ss.ss_ScanTupleSlot);
+ /* drop hash table */
+ if (node->iss_DupHash)
+ hash_destroy(node->iss_DupHash);
+
/*
* close the index relations
*/
@@ -507,7 +595,7 @@ ExecIndexRestrPos(IndexScanState *node)
/* ----------------------------------------------------------------
* ExecInitIndexScan
- *
+ *
* Initializes the index scan's state information, creates
* scan keys, and opens the base and index relations.
*
@@ -920,11 +1008,41 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
ExecAssignScanProjectionInfo(&indexstate->ss);
/*
+ * Initialize hash table if needed.
+ */
+ if (numIndices > 1)
+ create_duphash(indexstate);
+ else
+ indexstate->iss_DupHash = NULL;
+
+ /*
* all done.
*/
return indexstate;
}
+static void
+create_duphash(IndexScanState *node)
+{
+ HASHCTL hash_ctl;
+
+ MemSet(&hash_ctl, 0, sizeof(hash_ctl));
+ hash_ctl.keysize = SizeOfIptrData;
+ hash_ctl.entrysize = sizeof(DupHashTabEntry);
+ hash_ctl.hash = tag_hash;
+ hash_ctl.hcxt = CurrentMemoryContext;
+ node->iss_DupHash = hash_create("DupHashTable",
+ (long) ceil(node->ss.ps.plan->plan_rows),
+ &hash_ctl,
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
+ if (node->iss_DupHash == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+ node->iss_MaxHash = (SortMem * 1024L) /
+ (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry)));
+}
+
int
ExecCountSlotsIndexScan(IndexScan *node)
{
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 7530be263f3..71209fe246d 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.54 2003/08/08 21:41:42 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.54.2.1 2003/09/07 04:36:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -627,8 +627,8 @@ findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot)
TupleHashIterator hashiter;
TupleHashEntry entry;
- ResetTupleHashIterator(&hashiter);
- while ((entry = ScanTupleHashTable(hashtable, &hashiter)) != NULL)
+ ResetTupleHashIterator(hashtable, &hashiter);
+ while ((entry = ScanTupleHashTable(&hashiter)) != NULL)
{
if (!execTuplesUnequal(entry->firstTuple,
tuple,