summaryrefslogtreecommitdiff
path: root/src/test/modules/delay_execution/specs
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2020-08-07 14:30:41 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2020-08-07 14:30:47 -0400
commit6f0b632f083ba08fabb6c496caf733802cee9d2e (patch)
treed726cf61fb7e44cf7c3a857be0dd07428043714b /src/test/modules/delay_execution/specs
parent3df92bbd1dba98f72e3f005406463b0718193a0f (diff)
Support testing of cases where table schemas change after planning.
We have various cases where we allow DDL on tables to be performed with less than full AccessExclusiveLock. This requires concurrent queries to be able to cope with the DDL change mid-flight, but up to now we had no repeatable way to test such cases. To improve that, invent a test module that allows halting a backend after planning and then resuming execution once we've done desired actions in another session. (The same approach could be used to inject delays in other places, if there's a suitable hook available.) This commit includes a single test case, which is meant to exercise the previously-untestable ExecCreatePartitionPruneState code repaired by commit 7a980dfc6. We'd probably not bother with this if that were the only foreseen benefit, but I expect additional test cases will use this infrastructure in the future. Test module by Andy Fan, partition-addition test case by me. Discussion: https://postgr.es/m/20200802181131.GA27754@telsasoft.com
Diffstat (limited to 'src/test/modules/delay_execution/specs')
-rw-r--r--src/test/modules/delay_execution/specs/partition-addition.spec38
1 files changed, 38 insertions, 0 deletions
diff --git a/src/test/modules/delay_execution/specs/partition-addition.spec b/src/test/modules/delay_execution/specs/partition-addition.spec
new file mode 100644
index 00000000000..2a0948247e3
--- /dev/null
+++ b/src/test/modules/delay_execution/specs/partition-addition.spec
@@ -0,0 +1,38 @@
+# Test addition of a partition with less-than-exclusive locking.
+
+setup
+{
+ CREATE TABLE foo (a int, b text) PARTITION BY LIST(a);
+ CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1);
+ CREATE TABLE foo3 PARTITION OF foo FOR VALUES IN (3);
+ CREATE TABLE foo4 PARTITION OF foo FOR VALUES IN (4);
+ INSERT INTO foo VALUES (1, 'ABC');
+ INSERT INTO foo VALUES (3, 'DEF');
+ INSERT INTO foo VALUES (4, 'GHI');
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+# The SELECT will be planned with just the three partitions shown above,
+# of which we expect foo1 to be pruned at planning and foo3 at execution.
+# Then we'll block, and by the time the query is actually executed,
+# partition foo2 will also exist. We expect that not to be scanned.
+# This test is specifically designed to check ExecCreatePartitionPruneState's
+# code for matching up the partition lists in such cases.
+
+session "s1"
+step "s1exec" { LOAD 'delay_execution';
+ SET delay_execution.post_planning_lock_id = 12345;
+ SELECT * FROM foo WHERE a <> 1 AND a <> (SELECT 3); }
+
+session "s2"
+step "s2lock" { SELECT pg_advisory_lock(12345); }
+step "s2unlock" { SELECT pg_advisory_unlock(12345); }
+step "s2addp" { CREATE TABLE foo2 (LIKE foo);
+ ALTER TABLE foo ATTACH PARTITION foo2 FOR VALUES IN (2);
+ INSERT INTO foo VALUES (2, 'ADD2'); }
+
+permutation "s2lock" "s1exec" "s2addp" "s2unlock"