summaryrefslogtreecommitdiff
path: root/src/include/executor/execParallel.h
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2017-09-01 17:38:54 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2017-09-01 17:39:01 -0400
commit51daa7bdb39e1bdc31eb99fd3f54f61743ebb7ae (patch)
treea527c43c5129b7f154b32326337e062202c04010 /src/include/executor/execParallel.h
parentc039ba0716383ccaf88c9be1a7f0803a77823de1 (diff)
Improve division of labor between execParallel.c and nodeGather[Merge].c.
Move the responsibility for creating/destroying TupleQueueReaders into execParallel.c, to avoid duplicative coding in nodeGather.c and nodeGatherMerge.c. Also, instead of having DestroyTupleQueueReader do shm_mq_detach, do it in the caller (which is now only ExecParallelFinish). This means execParallel.c does both the attaching and detaching of the tuple-queue-reader shm_mqs, which seems less weird than the previous arrangement. These changes also eliminate a vestigial memory leak (of the pei->tqueue array). It's now demonstrable that rescans of Gather or GatherMerge don't leak memory. Discussion: https://postgr.es/m/8670.1504192177@sss.pgh.pa.us
Diffstat (limited to 'src/include/executor/execParallel.h')
-rw-r--r--src/include/executor/execParallel.h18
1 files changed, 11 insertions, 7 deletions
diff --git a/src/include/executor/execParallel.h b/src/include/executor/execParallel.h
index 1cb895d8984..ed231f2d53f 100644
--- a/src/include/executor/execParallel.h
+++ b/src/include/executor/execParallel.h
@@ -23,17 +23,21 @@ typedef struct SharedExecutorInstrumentation SharedExecutorInstrumentation;
typedef struct ParallelExecutorInfo
{
- PlanState *planstate;
- ParallelContext *pcxt;
- BufferUsage *buffer_usage;
- SharedExecutorInstrumentation *instrumentation;
- shm_mq_handle **tqueue;
- dsa_area *area;
- bool finished;
+ PlanState *planstate; /* plan subtree we're running in parallel */
+ ParallelContext *pcxt; /* parallel context we're using */
+ BufferUsage *buffer_usage; /* points to bufusage area in DSM */
+ SharedExecutorInstrumentation *instrumentation; /* optional */
+ dsa_area *area; /* points to DSA area in DSM */
+ bool finished; /* set true by ExecParallelFinish */
+ /* These two arrays have pcxt->nworkers_launched entries: */
+ shm_mq_handle **tqueue; /* tuple queues for worker output */
+ struct TupleQueueReader **reader; /* tuple reader/writer support */
} ParallelExecutorInfo;
extern ParallelExecutorInfo *ExecInitParallelPlan(PlanState *planstate,
EState *estate, int nworkers, int64 tuples_needed);
+extern void ExecParallelCreateReaders(ParallelExecutorInfo *pei,
+ TupleDesc tupDesc);
extern void ExecParallelFinish(ParallelExecutorInfo *pei);
extern void ExecParallelCleanup(ParallelExecutorInfo *pei);
extern void ExecParallelReinitialize(PlanState *planstate,