summaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorAlvaro Herrera <alvherre@alvh.no-ip.org>2020-05-12 16:01:52 -0400
committerAlvaro Herrera <alvherre@alvh.no-ip.org>2020-05-12 16:07:30 -0400
commit3e9744465dbe51822c7d76baca1f934d54ba9452 (patch)
tree08526408599e7a83c122462413f793c0531576f4 /src/backend/executor
parent6a918c3ac8a6b1d8b53cead6fcb7cbd84eee5750 (diff)
Add -Wimplicit-fallthrough to CFLAGS and CXXFLAGS
Use it at level 4, a bit more restrictive than the default level, and tweak our commanding comments to FALLTHROUGH. (However, leave zic.c alone, since it's external code; to avoid the warnings that would appear there, change CFLAGS for that file in the Makefile.) Author: Julien Rouhaud <rjuju123@gmail.com> Author: Álvaro Herrera <alvherre@alvh.no-ip.org> Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://postgr.es/m/20200412081825.qyo5vwwco3fv4gdo@nol Discussion: https://postgr.es/m/flat/E1fDenm-0000C8-IJ@gemulon.postgresql.org
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/nodeHash.c14
-rw-r--r--src/backend/executor/nodeHashjoin.c10
-rw-r--r--src/backend/executor/nodeLimit.c4
3 files changed, 14 insertions, 14 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 5da13ada726..4516c6346bd 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -256,7 +256,7 @@ MultiExecParallelHash(HashState *node)
* way, wait for everyone to arrive here so we can proceed.
*/
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATING);
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_BUILD_HASHING_INNER:
@@ -1181,13 +1181,13 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
/* All other participants just flush their tuples to disk. */
ExecParallelHashCloseBatchAccessors(hashtable);
}
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_GROW_BATCHES_ALLOCATING:
/* Wait for the above to be finished. */
BarrierArriveAndWait(&pstate->grow_batches_barrier,
WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING);
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_GROW_BATCHES_REPARTITIONING:
/* Make sure that we have the current dimensions and buckets. */
@@ -1200,7 +1200,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
/* Wait for the above to be finished. */
BarrierArriveAndWait(&pstate->grow_batches_barrier,
WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING);
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_GROW_BATCHES_DECIDING:
@@ -1255,7 +1255,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
dsa_free(hashtable->area, pstate->old_batches);
pstate->old_batches = InvalidDsaPointer;
}
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_GROW_BATCHES_FINISHING:
/* Wait for the above to complete. */
@@ -1533,13 +1533,13 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
/* Clear the flag. */
pstate->growth = PHJ_GROWTH_OK;
}
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_GROW_BUCKETS_ALLOCATING:
/* Wait for the above to complete. */
BarrierArriveAndWait(&pstate->grow_buckets_barrier,
WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING);
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_GROW_BUCKETS_REINSERTING:
/* Reinsert all tuples into the hash table. */
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index cc8edacdd01..6159a6957f4 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -340,7 +340,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
else
node->hj_JoinState = HJ_NEED_NEW_OUTER;
- /* FALL THRU */
+ /* FALLTHROUGH */
case HJ_NEED_NEW_OUTER:
@@ -413,7 +413,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
/* OK, let's scan the bucket for matches */
node->hj_JoinState = HJ_SCAN_BUCKET;
- /* FALL THRU */
+ /* FALLTHROUGH */
case HJ_SCAN_BUCKET:
@@ -1137,13 +1137,13 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
if (BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_ELECTING))
ExecParallelHashTableAlloc(hashtable, batchno);
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_BATCH_ALLOCATING:
/* Wait for allocation to complete. */
BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_ALLOCATING);
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_BATCH_LOADING:
/* Start (or join in) loading tuples. */
@@ -1163,7 +1163,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
sts_end_parallel_scan(inner_tuples);
BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_LOADING);
- /* Fall through. */
+ /* FALLTHROUGH */
case PHJ_BATCH_PROBING:
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index d85cf7d93e8..371b15c14a7 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -69,7 +69,7 @@ ExecLimit(PlanState *pstate)
*/
recompute_limits(node);
- /* FALL THRU */
+ /* FALLTHROUGH */
case LIMIT_RESCAN:
@@ -216,7 +216,7 @@ ExecLimit(PlanState *pstate)
}
Assert(node->lstate == LIMIT_WINDOWEND_TIES);
- /* FALL THRU */
+ /* FALLTHROUGH */
case LIMIT_WINDOWEND_TIES:
if (ScanDirectionIsForward(direction))