summaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeHashjoin.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2023-05-19 17:24:48 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2023-05-19 17:24:48 -0400
commit0245f8db36f375326c2bae0c3420d3c77714e72d (patch)
tree7ce91f23658a05ea24be4703fb06cdc6b56248f7 /src/backend/executor/nodeHashjoin.c
parentdf6b19fbbc20d830de91d9bea68715a39635b568 (diff)
Pre-beta mechanical code beautification.
Run pgindent, pgperltidy, and reformat-dat-files. This set of diffs is a bit larger than typical. We've updated to pg_bsd_indent 2.1.2, which properly indents variable declarations that have multi-line initialization expressions (the continuation lines are now indented one tab stop). We've also updated to perltidy version 20230309 and changed some of its settings, which reduces its desire to add whitespace to lines to make assignments etc. line up. Going forward, that should make for fewer random-seeming changes to existing code. Discussion: https://postgr.es/m/20230428092545.qfb3y5wcu4cm75ur@alvherre.pgsql
Diffstat (limited to 'src/backend/executor/nodeHashjoin.c')
-rw-r--r--src/backend/executor/nodeHashjoin.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index e40436db38e..980746128bc 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -1216,7 +1216,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
{
SharedTuplestoreAccessor *inner_tuples;
Barrier *batch_barrier =
- &hashtable->batches[batchno].shared->batch_barrier;
+ &hashtable->batches[batchno].shared->batch_barrier;
switch (BarrierAttach(batch_barrier))
{
@@ -1330,22 +1330,22 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
BufFile *file = *fileptr;
/*
- * The batch file is lazily created. If this is the first tuple
- * written to this batch, the batch file is created and its buffer is
- * allocated in the spillCxt context, NOT in the batchCxt.
+ * The batch file is lazily created. If this is the first tuple written to
+ * this batch, the batch file is created and its buffer is allocated in
+ * the spillCxt context, NOT in the batchCxt.
*
- * During the build phase, buffered files are created for inner
- * batches. Each batch's buffered file is closed (and its buffer freed)
- * after the batch is loaded into memory during the outer side scan.
- * Therefore, it is necessary to allocate the batch file buffer in a
- * memory context which outlives the batch itself.
+ * During the build phase, buffered files are created for inner batches.
+ * Each batch's buffered file is closed (and its buffer freed) after the
+ * batch is loaded into memory during the outer side scan. Therefore, it
+ * is necessary to allocate the batch file buffer in a memory context
+ * which outlives the batch itself.
*
- * Also, we use spillCxt instead of hashCxt for a better accounting of
- * the spilling memory consumption.
+ * Also, we use spillCxt instead of hashCxt for a better accounting of the
+ * spilling memory consumption.
*/
if (file == NULL)
{
- MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
+ MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
file = BufFileCreateTemp(false);
*fileptr = file;
@@ -1622,7 +1622,7 @@ ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
{
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(pcxt->toc, plan_node_id, false);
+ shm_toc_lookup(pcxt->toc, plan_node_id, false);
/*
* It would be possible to reuse the shared hash table in single-batch
@@ -1657,7 +1657,7 @@ ExecHashJoinInitializeWorker(HashJoinState *state,
HashState *hashNode;
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(pwcxt->toc, plan_node_id, false);
+ shm_toc_lookup(pwcxt->toc, plan_node_id, false);
/* Attach to the space for shared temporary files. */
SharedFileSetAttach(&pstate->fileset, pwcxt->seg);