summaryrefslogtreecommitdiff
path: root/src/bin
diff options
context:
space:
mode:
Diffstat (limited to 'src/bin')
-rw-r--r--src/bin/initdb/t/001_initdb.pl10
-rw-r--r--src/bin/pg_basebackup/t/010_pg_basebackup.pl2
-rw-r--r--src/bin/pg_checksums/t/002_actions.pl166
-rw-r--r--src/bin/pg_ctl/t/001_start_stop.pl1
-rw-r--r--src/bin/pg_ctl/t/004_logrotate.pl21
-rw-r--r--src/bin/pg_dump/t/001_basic.pl11
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl41
-rw-r--r--src/bin/pg_rewind/t/002_databases.pl6
-rw-r--r--src/bin/pg_rewind/t/RewindTest.pm16
-rw-r--r--src/bin/pgbench/t/001_pgbench_with_server.pl144
-rw-r--r--src/bin/scripts/t/090_reindexdb.pl3
-rw-r--r--src/bin/scripts/t/100_vacuumdb.pl12
12 files changed, 241 insertions, 192 deletions
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index 8dfcd8752a1..8387b945d36 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -60,14 +60,14 @@ mkdir $datadir;
}
# Control file should tell that data checksums are disabled by default.
-command_like(['pg_controldata', $datadir],
- qr/Data page checksum version:.*0/,
- 'checksums are disabled in control file');
+command_like(
+ [ 'pg_controldata', $datadir ],
+ qr/Data page checksum version:.*0/,
+ 'checksums are disabled in control file');
# pg_checksums fails with checksums disabled by default. This is
# not part of the tests included in pg_checksums to save from
# the creation of an extra instance.
-command_fails(
- [ 'pg_checksums', '-D', $datadir],
+command_fails([ 'pg_checksums', '-D', $datadir ],
"pg_checksums fails with data checksum disabled");
command_ok([ 'initdb', '-S', $datadir ], 'sync only');
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 33869fecc97..7d59d3dffa6 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -359,7 +359,7 @@ SKIP:
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backupR", '-R' ],
'pg_basebackup -R runs');
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
-ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
+ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
rmtree("$tempdir/backupR");
diff --git a/src/bin/pg_checksums/t/002_actions.pl b/src/bin/pg_checksums/t/002_actions.pl
index 41575c52459..a8f45a268a6 100644
--- a/src/bin/pg_checksums/t/002_actions.pl
+++ b/src/bin/pg_checksums/t/002_actions.pl
@@ -14,21 +14,22 @@ use Test::More tests => 62;
# at the end.
sub check_relation_corruption
{
- my $node = shift;
- my $table = shift;
+ my $node = shift;
+ my $table = shift;
my $tablespace = shift;
- my $pgdata = $node->data_dir;
+ my $pgdata = $node->data_dir;
- $node->safe_psql('postgres',
+ $node->safe_psql(
+ 'postgres',
"SELECT a INTO $table FROM generate_series(1,10000) AS a;
ALTER TABLE $table SET (autovacuum_enabled=false);");
$node->safe_psql('postgres',
- "ALTER TABLE ".$table." SET TABLESPACE ".$tablespace.";");
+ "ALTER TABLE " . $table . " SET TABLESPACE " . $tablespace . ";");
- my $file_corrupted = $node->safe_psql('postgres',
- "SELECT pg_relation_filepath('$table');");
- my $relfilenode_corrupted = $node->safe_psql('postgres',
+ my $file_corrupted =
+ $node->safe_psql('postgres', "SELECT pg_relation_filepath('$table');");
+ my $relfilenode_corrupted = $node->safe_psql('postgres',
"SELECT relfilenode FROM pg_class WHERE relname = '$table';");
# Set page header and block size
@@ -38,9 +39,14 @@ sub check_relation_corruption
# Checksums are correct for single relfilenode as the table is not
# corrupted yet.
- command_ok(['pg_checksums', '--check', '-D', $pgdata, '-r',
- $relfilenode_corrupted],
- "succeeds for single relfilenode on tablespace $tablespace with offline cluster");
+ command_ok(
+ [
+ 'pg_checksums', '--check',
+ '-D', $pgdata,
+ '-r', $relfilenode_corrupted
+ ],
+ "succeeds for single relfilenode on tablespace $tablespace with offline cluster"
+ );
# Time to create some corruption
open my $file, '+<', "$pgdata/$file_corrupted";
@@ -49,26 +55,32 @@ sub check_relation_corruption
close $file;
# Checksum checks on single relfilenode fail
- $node->command_checks_all([ 'pg_checksums', '--check', '-D', $pgdata,
- '-r', $relfilenode_corrupted],
- 1,
- [qr/Bad checksums:.*1/],
- [qr/checksum verification failed/],
- "fails with corrupted data for single relfilenode on tablespace $tablespace");
+ $node->command_checks_all(
+ [
+ 'pg_checksums', '--check',
+ '-D', $pgdata,
+ '-r', $relfilenode_corrupted
+ ],
+ 1,
+ [qr/Bad checksums:.*1/],
+ [qr/checksum verification failed/],
+ "fails with corrupted data for single relfilenode on tablespace $tablespace"
+ );
# Global checksum checks fail as well
- $node->command_checks_all([ 'pg_checksums', '--check', '-D', $pgdata],
- 1,
- [qr/Bad checksums:.*1/],
- [qr/checksum verification failed/],
- "fails with corrupted data on tablespace $tablespace");
+ $node->command_checks_all(
+ [ 'pg_checksums', '--check', '-D', $pgdata ],
+ 1,
+ [qr/Bad checksums:.*1/],
+ [qr/checksum verification failed/],
+ "fails with corrupted data on tablespace $tablespace");
# Drop corrupted table again and make sure there is no more corruption.
$node->start;
$node->safe_psql('postgres', "DROP TABLE $table;");
$node->stop;
- $node->command_ok(['pg_checksums', '--check', '-D', $pgdata],
- "succeeds again after table drop on tablespace $tablespace");
+ $node->command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
+ "succeeds again after table drop on tablespace $tablespace");
$node->start;
return;
@@ -80,19 +92,20 @@ $node->init();
my $pgdata = $node->data_dir;
# Control file should know that checksums are disabled.
-command_like(['pg_controldata', $pgdata],
- qr/Data page checksum version:.*0/,
- 'checksums disabled in control file');
+command_like(
+ [ 'pg_controldata', $pgdata ],
+ qr/Data page checksum version:.*0/,
+ 'checksums disabled in control file');
# These are correct but empty files, so they should pass through.
-append_to_file "$pgdata/global/99999", "";
-append_to_file "$pgdata/global/99999.123", "";
-append_to_file "$pgdata/global/99999_fsm", "";
-append_to_file "$pgdata/global/99999_init", "";
-append_to_file "$pgdata/global/99999_vm", "";
+append_to_file "$pgdata/global/99999", "";
+append_to_file "$pgdata/global/99999.123", "";
+append_to_file "$pgdata/global/99999_fsm", "";
+append_to_file "$pgdata/global/99999_init", "";
+append_to_file "$pgdata/global/99999_vm", "";
append_to_file "$pgdata/global/99999_init.123", "";
-append_to_file "$pgdata/global/99999_fsm.123", "";
-append_to_file "$pgdata/global/99999_vm.123", "";
+append_to_file "$pgdata/global/99999_fsm.123", "";
+append_to_file "$pgdata/global/99999_vm.123", "";
# These are temporary files and folders with dummy contents, which
# should be ignored by the scan.
@@ -101,67 +114,75 @@ mkdir "$pgdata/global/pgsql_tmp";
append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo";
# Enable checksums.
-command_ok(['pg_checksums', '--enable', '--no-sync', '-D', $pgdata],
- "checksums successfully enabled in cluster");
+command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+ "checksums successfully enabled in cluster");
# Successive attempt to enable checksums fails.
-command_fails(['pg_checksums', '--enable', '--no-sync', '-D', $pgdata],
- "enabling checksums fails if already enabled");
+command_fails([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+ "enabling checksums fails if already enabled");
# Control file should know that checksums are enabled.
-command_like(['pg_controldata', $pgdata],
- qr/Data page checksum version:.*1/,
- 'checksums enabled in control file');
+command_like(
+ [ 'pg_controldata', $pgdata ],
+ qr/Data page checksum version:.*1/,
+ 'checksums enabled in control file');
# Disable checksums again. Flush result here as that should be cheap.
-command_ok(['pg_checksums', '--disable', '-D', $pgdata],
- "checksums successfully disabled in cluster");
+command_ok(
+ [ 'pg_checksums', '--disable', '-D', $pgdata ],
+ "checksums successfully disabled in cluster");
# Successive attempt to disable checksums fails.
-command_fails(['pg_checksums', '--disable', '--no-sync', '-D', $pgdata],
- "disabling checksums fails if already disabled");
+command_fails(
+ [ 'pg_checksums', '--disable', '--no-sync', '-D', $pgdata ],
+ "disabling checksums fails if already disabled");
# Control file should know that checksums are disabled.
-command_like(['pg_controldata', $pgdata],
- qr/Data page checksum version:.*0/,
- 'checksums disabled in control file');
+command_like(
+ [ 'pg_controldata', $pgdata ],
+ qr/Data page checksum version:.*0/,
+ 'checksums disabled in control file');
# Enable checksums again for follow-up tests.
-command_ok(['pg_checksums', '--enable', '--no-sync', '-D', $pgdata],
- "checksums successfully enabled in cluster");
+command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+ "checksums successfully enabled in cluster");
# Control file should know that checksums are enabled.
-command_like(['pg_controldata', $pgdata],
- qr/Data page checksum version:.*1/,
- 'checksums enabled in control file');
+command_like(
+ [ 'pg_controldata', $pgdata ],
+ qr/Data page checksum version:.*1/,
+ 'checksums enabled in control file');
# Checksums pass on a newly-created cluster
-command_ok(['pg_checksums', '--check', '-D', $pgdata],
- "succeeds with offline cluster");
+command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
+ "succeeds with offline cluster");
# Checksums are verified if no other arguments are specified
-command_ok(['pg_checksums', '-D', $pgdata],
- "verifies checksums as default action");
+command_ok(
+ [ 'pg_checksums', '-D', $pgdata ],
+ "verifies checksums as default action");
# Specific relation files cannot be requested when action is --disable
# or --enable.
-command_fails(['pg_checksums', '--disable', '-r', '1234', '-D', $pgdata],
- "fails when relfilenodes are requested and action is --disable");
-command_fails(['pg_checksums', '--enable', '-r', '1234', '-D', $pgdata],
- "fails when relfilenodes are requested and action is --enable");
+command_fails(
+ [ 'pg_checksums', '--disable', '-r', '1234', '-D', $pgdata ],
+ "fails when relfilenodes are requested and action is --disable");
+command_fails(
+ [ 'pg_checksums', '--enable', '-r', '1234', '-D', $pgdata ],
+ "fails when relfilenodes are requested and action is --enable");
# Checks cannot happen with an online cluster
$node->start;
-command_fails(['pg_checksums', '--check', '-D', $pgdata],
- "fails with online cluster");
+command_fails([ 'pg_checksums', '--check', '-D', $pgdata ],
+ "fails with online cluster");
# Check corruption of table on default tablespace.
check_relation_corruption($node, 'corrupt1', 'pg_default');
# Create tablespace to check corruptions in a non-default tablespace.
-my $basedir = $node->basedir;
+my $basedir = $node->basedir;
my $tablespace_dir = "$basedir/ts_corrupt_dir";
-mkdir ($tablespace_dir);
+mkdir($tablespace_dir);
$tablespace_dir = TestLib::real_dir($tablespace_dir);
$node->safe_psql('postgres',
"CREATE TABLESPACE ts_corrupt LOCATION '$tablespace_dir';");
@@ -171,19 +192,20 @@ check_relation_corruption($node, 'corrupt2', 'ts_corrupt');
# correctly-named relation files filled with some corrupted data.
sub fail_corrupt
{
- my $node = shift;
- my $file = shift;
+ my $node = shift;
+ my $file = shift;
my $pgdata = $node->data_dir;
# Create the file with some dummy data in it.
my $file_name = "$pgdata/global/$file";
append_to_file $file_name, "foo";
- $node->command_checks_all([ 'pg_checksums', '--check', '-D', $pgdata],
- 1,
- [qr/^$/],
- [qr/could not read block 0 in file.*$file\":/],
- "fails for corrupted data in $file");
+ $node->command_checks_all(
+ [ 'pg_checksums', '--check', '-D', $pgdata ],
+ 1,
+ [qr/^$/],
+ [qr/could not read block 0 in file.*$file\":/],
+ "fails for corrupted data in $file");
# Remove file to prevent future lookup errors on conflicts.
unlink $file_name;
diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl
index a1143e34519..e5d46a6f257 100644
--- a/src/bin/pg_ctl/t/001_start_stop.pl
+++ b/src/bin/pg_ctl/t/001_start_stop.pl
@@ -26,6 +26,7 @@ open my $conf, '>>', "$tempdir/data/postgresql.conf";
print $conf "fsync = off\n";
print $conf TestLib::slurp_file($ENV{TEMP_CONFIG})
if defined $ENV{TEMP_CONFIG};
+
if (!$windows_os)
{
print $conf "listen_addresses = ''\n";
diff --git a/src/bin/pg_ctl/t/004_logrotate.pl b/src/bin/pg_ctl/t/004_logrotate.pl
index e8b60b769f6..71dbfd20301 100644
--- a/src/bin/pg_ctl/t/004_logrotate.pl
+++ b/src/bin/pg_ctl/t/004_logrotate.pl
@@ -25,8 +25,10 @@ my $current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
note "current_logfiles = $current_logfiles";
-like($current_logfiles, qr|^stderr log/postgresql-.*log$|,
- 'current_logfiles is sane');
+like(
+ $current_logfiles,
+ qr|^stderr log/postgresql-.*log$|,
+ 'current_logfiles is sane');
my $lfname = $current_logfiles;
$lfname =~ s/^stderr //;
@@ -43,8 +45,7 @@ for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
usleep(100_000);
}
-like($first_logfile, qr/division by zero/,
- 'found expected log file content');
+like($first_logfile, qr/division by zero/, 'found expected log file content');
# Sleep 2 seconds and ask for log rotation; this should result in
# output into a different log file name.
@@ -63,8 +64,10 @@ for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
note "now current_logfiles = $new_current_logfiles";
-like($new_current_logfiles, qr|^stderr log/postgresql-.*log$|,
- 'new current_logfiles is sane');
+like(
+ $new_current_logfiles,
+ qr|^stderr log/postgresql-.*log$|,
+ 'new current_logfiles is sane');
$lfname = $new_current_logfiles;
$lfname =~ s/^stderr //;
@@ -82,7 +85,9 @@ for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
usleep(100_000);
}
-like($second_logfile, qr/syntax error/,
- 'found expected log file content in new log file');
+like(
+ $second_logfile,
+ qr/syntax error/,
+ 'found expected log file content in new log file');
$node->stop();
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 5e3f0e58398..049d5e43f2f 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -50,10 +50,9 @@ command_fails_like(
);
command_fails_like(
- [ 'pg_restore' ],
+ ['pg_restore'],
qr{\Qpg_restore: error: one of -d/--dbname and -f/--file must be specified\E},
- 'pg_restore: error: one of -d/--dbname and -f/--file must be specified'
-);
+ 'pg_restore: error: one of -d/--dbname and -f/--file must be specified');
command_fails_like(
[ 'pg_restore', '-s', '-a', '-f -' ],
@@ -125,7 +124,8 @@ command_fails_like(
command_fails_like(
[ 'pg_dump', '--on-conflict-do-nothing' ],
qr/pg_dump: error: option --on-conflict-do-nothing requires option --inserts, --rows-per-insert or --column-inserts/,
- 'pg_dump: --on-conflict-do-nothing requires --inserts, --rows-per-insert, --column-inserts');
+ 'pg_dump: --on-conflict-do-nothing requires --inserts, --rows-per-insert, --column-inserts'
+);
# pg_dumpall command-line argument checks
command_fails_like(
@@ -161,4 +161,5 @@ command_fails_like(
command_fails_like(
[ 'pg_dumpall', '--exclude-database=foo', '--globals-only' ],
qr/\Qpg_dumpall: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
- 'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only');
+ 'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only'
+);
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 5721882b3b2..cb9181e1569 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -810,7 +810,8 @@ my %tests = (
},
'ALTER TABLE test_second_table OWNER TO' => {
- regexp => qr/^\QALTER TABLE dump_test.test_second_table OWNER TO \E.+;/m,
+ regexp =>
+ qr/^\QALTER TABLE dump_test.test_second_table OWNER TO \E.+;/m,
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
@@ -2427,7 +2428,7 @@ my %tests = (
\QALTER INDEX dump_test.index_with_stats ALTER COLUMN 3 SET STATISTICS 500;\E\n
/xms,
like =>
- { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
+ { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
},
@@ -2900,12 +2901,12 @@ my %tests = (
data_only => 1,
section_pre_data => 1,
test_schema_plus_blobs => 1,
- binary_upgrade => 1,
+ binary_upgrade => 1,
},
unlike => {
- no_blobs => 1,
- no_privs => 1,
- schema_only => 1,
+ no_blobs => 1,
+ no_privs => 1,
+ schema_only => 1,
},
},
@@ -3116,13 +3117,13 @@ my %tests = (
'CREATE ACCESS METHOD regress_test_table_am' => {
create_order => 11,
- create_sql => 'CREATE ACCESS METHOD regress_table_am TYPE TABLE HANDLER heap_tableam_handler;',
+ create_sql =>
+ 'CREATE ACCESS METHOD regress_table_am TYPE TABLE HANDLER heap_tableam_handler;',
regexp => qr/^
\QCREATE ACCESS METHOD regress_table_am TYPE TABLE HANDLER heap_tableam_handler;\E
\n/xm,
like => {
- %full_runs,
- section_pre_data => 1,
+ %full_runs, section_pre_data => 1,
},
},
@@ -3134,7 +3135,7 @@ my %tests = (
# pretty, but seems hard to do better in this framework.
'CREATE TABLE regress_pg_dump_table_am' => {
create_order => 12,
- create_sql => '
+ create_sql => '
CREATE TABLE dump_test.regress_pg_dump_table_am_0() USING heap;
CREATE TABLE dump_test.regress_pg_dump_table_am_1 (col1 int) USING regress_table_am;
CREATE TABLE dump_test.regress_pg_dump_table_am_2() USING heap;',
@@ -3145,16 +3146,14 @@ my %tests = (
\n\s+\Qcol1 integer\E
\n\);/xm,
like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1,
+ %full_runs, %dump_test_schema_runs, section_pre_data => 1,
},
- unlike => { exclude_dump_test_schema => 1},
+ unlike => { exclude_dump_test_schema => 1 },
},
'CREATE MATERIALIZED VIEW regress_pg_dump_matview_am' => {
create_order => 13,
- create_sql => '
+ create_sql => '
CREATE MATERIALIZED VIEW dump_test.regress_pg_dump_matview_am_0 USING heap AS SELECT 1;
CREATE MATERIALIZED VIEW dump_test.regress_pg_dump_matview_am_1
USING regress_table_am AS SELECT count(*) FROM pg_class;
@@ -3167,13 +3166,10 @@ my %tests = (
\n\s+\QFROM pg_class\E
\n\s+\QWITH NO DATA;\E\n/xm,
like => {
- %full_runs,
- %dump_test_schema_runs,
- section_pre_data => 1,
+ %full_runs, %dump_test_schema_runs, section_pre_data => 1,
},
- unlike => { exclude_dump_test_schema => 1},
- }
-);
+ unlike => { exclude_dump_test_schema => 1 },
+ });
#########################################
# Create a PG instance to test actually dumping from
@@ -3330,8 +3326,7 @@ foreach my $db (sort keys %create_sql)
command_fails_like(
[ 'pg_dump', '-p', "$port", 'qqq' ],
qr/\Qpg_dump: error: connection to database "qqq" failed: FATAL: database "qqq" does not exist\E/,
- 'connecting to a non-existent database'
-);
+ 'connecting to a non-existent database');
#########################################
# Test connecting with an unprivileged user
diff --git a/src/bin/pg_rewind/t/002_databases.pl b/src/bin/pg_rewind/t/002_databases.pl
index 0562c21549b..f1eb4fe1d2b 100644
--- a/src/bin/pg_rewind/t/002_databases.pl
+++ b/src/bin/pg_rewind/t/002_databases.pl
@@ -25,17 +25,17 @@ sub run_test
# replicated to the standby.
master_psql('CREATE DATABASE beforepromotion');
master_psql('CREATE TABLE beforepromotion_tab (a int)',
- 'beforepromotion');
+ 'beforepromotion');
RewindTest::promote_standby();
# Create databases in the old master and the new promoted standby.
master_psql('CREATE DATABASE master_afterpromotion');
master_psql('CREATE TABLE master_promotion_tab (a int)',
- 'master_afterpromotion');
+ 'master_afterpromotion');
standby_psql('CREATE DATABASE standby_afterpromotion');
standby_psql('CREATE TABLE standby_promotion_tab (a int)',
- 'standby_afterpromotion');
+ 'standby_afterpromotion');
# The clusters are now diverged.
diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm
index f477ffab1da..61904f25ff4 100644
--- a/src/bin/pg_rewind/t/RewindTest.pm
+++ b/src/bin/pg_rewind/t/RewindTest.pm
@@ -133,8 +133,10 @@ sub setup_cluster
# Set up pg_hba.conf and pg_ident.conf for the role running
# pg_rewind. This role is used for all the tests, and has
# minimal permissions enough to rewind from an online source.
- $node_master->init(allows_streaming => 1, extra => $extra,
- auth_extra => ['--create-role', 'rewind_user']);
+ $node_master->init(
+ allows_streaming => 1,
+ extra => $extra,
+ auth_extra => [ '--create-role', 'rewind_user' ]);
# Set wal_keep_segments to prevent WAL segment recycling after enforced
# checkpoints in the tests.
@@ -151,7 +153,8 @@ sub start_master
# Create custom role which is used to run pg_rewind, and adjust its
# permissions to the minimum necessary.
- $node_master->psql('postgres', "
+ $node_master->psql(
+ 'postgres', "
CREATE ROLE rewind_user LOGIN;
GRANT EXECUTE ON function pg_catalog.pg_ls_dir(text, boolean, boolean)
TO rewind_user;
@@ -265,10 +268,9 @@ sub run_pg_rewind
# Do rewind using a remote connection as source
command_ok(
[
- 'pg_rewind', "--debug",
- "--source-server", $standby_connstr,
- "--target-pgdata=$master_pgdata",
- "--no-sync"
+ 'pg_rewind', "--debug",
+ "--source-server", $standby_connstr,
+ "--target-pgdata=$master_pgdata", "--no-sync"
],
'pg_rewind remote');
}
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index b75a018db4b..dc2c72fa927 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -63,7 +63,7 @@ sub pgbench
# makes a 5-MiB table.
$node->safe_psql('postgres',
- 'CREATE UNLOGGED TABLE insert_tbl (id serial primary key); ');
+ 'CREATE UNLOGGED TABLE insert_tbl (id serial primary key); ');
pgbench(
'--no-vacuum --client=5 --protocol=prepared --transactions=25',
@@ -286,7 +286,7 @@ pgbench(
qr{command=15.: double 15\b},
qr{command=16.: double 16\b},
qr{command=17.: double 17\b},
- qr{command=20.: int 1\b}, # zipfian random
+ qr{command=20.: int 1\b}, # zipfian random
qr{command=21.: double -27\b},
qr{command=22.: double 1024\b},
qr{command=23.: double 1\b},
@@ -326,9 +326,9 @@ pgbench(
qr{command=86.: int 86\b},
qr{command=93.: int 93\b},
qr{command=95.: int 0\b},
- qr{command=96.: int 1\b}, # :scale
- qr{command=97.: int 0\b}, # :client_id
- qr{command=98.: int 5432\b}, # :random_seed
+ qr{command=96.: int 1\b}, # :scale
+ qr{command=97.: int 0\b}, # :client_id
+ qr{command=98.: int 5432\b}, # :random_seed
qr{command=99.: int -9223372036854775808\b}, # min int
qr{command=100.: int 9223372036854775807\b}, # max int
],
@@ -542,14 +542,17 @@ pgbench(
pgbench(
'-t 1', 0,
[ qr{type: .*/001_pgbench_gset}, qr{processed: 1/1} ],
- [ qr{command=3.: int 0\b},
+ [
+ qr{command=3.: int 0\b},
qr{command=5.: int 1\b},
qr{command=6.: int 2\b},
qr{command=8.: int 3\b},
qr{command=10.: int 4\b},
- qr{command=12.: int 5\b} ],
+ qr{command=12.: int 5\b}
+ ],
'pgbench gset command',
- { '001_pgbench_gset' => q{-- test gset
+ {
+ '001_pgbench_gset' => q{-- test gset
-- no columns
SELECT \gset
-- one value
@@ -568,7 +571,8 @@ SELECT 0 AS i4, 4 AS i4 \gset
-- work on the last SQL command under \;
\; \; SELECT 0 AS i5 \; SELECT 5 AS i5 \; \; \gset
\set i debug(:i5)
-} });
+}
+ });
# trigger many expression errors
my @errors = (
@@ -587,10 +591,11 @@ my @errors = (
}
],
[
- 'sql too many args', 1, [qr{statement has too many arguments.*\b255\b}],
+ 'sql too many args', 1,
+ [qr{statement has too many arguments.*\b255\b}],
q{-- MAX_ARGS=256 for prepared
\set i 0
-SELECT LEAST(}.join(', ', (':i') x 256).q{)}
+SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
],
# SHELL
@@ -609,7 +614,7 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
[
'shell too many args', 1, [qr{too many arguments in command "shell"}],
q{-- 256 arguments to \shell
-\shell echo }.join(' ', ('arg') x 255)
+\shell echo } . join(' ', ('arg') x 255)
],
# SET
@@ -625,11 +630,9 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
'set invalid variable name', 2,
[qr{invalid variable name}], q{\set . 1}
],
+ [ 'set division by zero', 2, [qr{division by zero}], q{\set i 1/0} ],
[
- 'set division by zero', 2,
- [qr{division by zero}], q{\set i 1/0}
- ],
- [ 'set undefined variable',
+ 'set undefined variable',
2,
[qr{undefined variable "nosuchvariable"}],
q{\set i :nosuchvariable}
@@ -646,10 +649,8 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
[qr{empty range given to random}], q{\set i random(5,3)}
],
[
- 'set random range too large',
- 2,
- [qr{random range is too large}],
- q{\set i random(:minint, :maxint)}
+ 'set random range too large', 2,
+ [qr{random range is too large}], q{\set i random(:minint, :maxint)}
],
[
'set gaussian param too small',
@@ -713,16 +714,26 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
],
# SET: ARITHMETIC OVERFLOW DETECTION
- [ 'set double to int overflow', 2,
- [ qr{double to int overflow for 100} ], q{\set i int(1E32)} ],
- [ 'set bigint add overflow', 2,
- [ qr{int add out} ], q{\set i (1<<62) + (1<<62)} ],
- [ 'set bigint sub overflow', 2,
- [ qr{int sub out} ], q{\set i 0 - (1<<62) - (1<<62) - (1<<62)} ],
- [ 'set bigint mul overflow', 2,
- [ qr{int mul out} ], q{\set i 2 * (1<<62)} ],
- [ 'set bigint div out of range', 2,
- [ qr{bigint div out of range} ], q{\set i :minint / -1} ],
+ [
+ 'set double to int overflow', 2,
+ [qr{double to int overflow for 100}], q{\set i int(1E32)}
+ ],
+ [
+ 'set bigint add overflow', 2,
+ [qr{int add out}], q{\set i (1<<62) + (1<<62)}
+ ],
+ [
+ 'set bigint sub overflow',
+ 2, [qr{int sub out}], q{\set i 0 - (1<<62) - (1<<62) - (1<<62)}
+ ],
+ [
+ 'set bigint mul overflow', 2,
+ [qr{int mul out}], q{\set i 2 * (1<<62)}
+ ],
+ [
+ 'set bigint div out of range', 2,
+ [qr{bigint div out of range}], q{\set i :minint / -1}
+ ],
# SETSHELL
[
@@ -759,31 +770,47 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
[qr{invalid command .* "nosuchcommand"}], q{\nosuchcommand}
],
[ 'misc empty script', 1, [qr{empty command list for script}], q{} ],
- [ 'bad boolean', 2,
- [qr{malformed variable.*trueXXX}], q{\set b :badtrue or true} ],
+ [
+ 'bad boolean', 2,
+ [qr{malformed variable.*trueXXX}], q{\set b :badtrue or true}
+ ],
# GSET
- [ 'gset no row', 2,
- [qr{expected one row, got 0\b}], q{SELECT WHERE FALSE \gset} ],
+ [
+ 'gset no row', 2,
+ [qr{expected one row, got 0\b}], q{SELECT WHERE FALSE \gset}
+ ],
[ 'gset alone', 1, [qr{gset must follow a SQL command}], q{\gset} ],
- [ 'gset no SQL', 1,
+ [
+ 'gset no SQL', 1,
[qr{gset must follow a SQL command}], q{\set i +1
-\gset} ],
- [ 'gset too many arguments', 1,
- [qr{too many arguments}], q{SELECT 1 \gset a b} ],
- [ 'gset after gset', 1,
- [qr{gset must follow a SQL command}], q{SELECT 1 AS i \gset
-\gset} ],
- [ 'gset non SELECT', 2,
+\gset}
+ ],
+ [
+ 'gset too many arguments', 1,
+ [qr{too many arguments}], q{SELECT 1 \gset a b}
+ ],
+ [
+ 'gset after gset', 1,
+ [qr{gset must follow a SQL command}], q{SELECT 1 AS i \gset
+\gset}
+ ],
+ [
+ 'gset non SELECT',
+ 2,
[qr{expected one row, got 0}],
- q{DROP TABLE IF EXISTS no_such_table \gset} ],
- [ 'gset bad default name', 2,
- [qr{error storing into variable \?column\?}],
- q{SELECT 1 \gset} ],
- [ 'gset bad name', 2,
+ q{DROP TABLE IF EXISTS no_such_table \gset}
+ ],
+ [
+ 'gset bad default name', 2,
+ [qr{error storing into variable \?column\?}], q{SELECT 1 \gset}
+ ],
+ [
+ 'gset bad name',
+ 2,
[qr{error storing into variable bad name!}],
- q{SELECT 1 AS "bad name!" \gset} ],
- );
+ q{SELECT 1 AS "bad name!" \gset}
+ ],);
for my $e (@errors)
{
@@ -792,9 +819,9 @@ for my $e (@errors)
my $n = '001_pgbench_error_' . $name;
$n =~ s/ /_/g;
pgbench(
- '-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX' .
- ' -Dmaxint=9223372036854775807 -Dminint=-9223372036854775808' .
- ($no_prepare ? '' : ' -M prepared'),
+ '-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX'
+ . ' -Dmaxint=9223372036854775807 -Dminint=-9223372036854775808'
+ . ($no_prepare ? '' : ' -M prepared'),
$status,
[ $status == 1 ? qr{^$} : qr{processed: 0/1} ],
$re,
@@ -869,12 +896,9 @@ my $bdir = $node->basedir;
# with sampling rate
pgbench(
- "-n -S -t 50 -c 2 --log --sampling-rate=0.5",
- 0,
- [ qr{select only}, qr{processed: 100/100} ],
- [ qr{^$} ],
- 'pgbench logs',
- undef,
+ "-n -S -t 50 -c 2 --log --sampling-rate=0.5", 0,
+ [ qr{select only}, qr{processed: 100/100} ], [qr{^$}],
+ 'pgbench logs', undef,
"--log-prefix=$bdir/001_pgbench_log_2");
check_pgbench_logs($bdir, '001_pgbench_log_2', 1, 8, 92,
@@ -882,8 +906,8 @@ check_pgbench_logs($bdir, '001_pgbench_log_2', 1, 8, 92,
# check log file in some detail
pgbench(
- "-n -b se -t 10 -l",
- 0, [ qr{select only}, qr{processed: 10/10} ], [ qr{^$} ],
+ "-n -b se -t 10 -l", 0,
+ [ qr{select only}, qr{processed: 10/10} ], [qr{^$}],
'pgbench logs contents', undef,
"--log-prefix=$bdir/001_pgbench_log_3");
diff --git a/src/bin/scripts/t/090_reindexdb.pl b/src/bin/scripts/t/090_reindexdb.pl
index ef83be767ab..1af8ab70ad9 100644
--- a/src/bin/scripts/t/090_reindexdb.pl
+++ b/src/bin/scripts/t/090_reindexdb.pl
@@ -61,8 +61,7 @@ $node->issues_sql_like(
[ 'reindexdb', '--concurrently', '-S', 'public', 'postgres' ],
qr/statement: REINDEX SCHEMA CONCURRENTLY public;/,
'reindex specific schema concurrently');
-$node->command_fails(
- [ 'reindexdb', '--concurrently', '-s', 'postgres' ],
+$node->command_fails([ 'reindexdb', '--concurrently', '-s', 'postgres' ],
'reindex system tables concurrently');
$node->issues_sql_like(
[ 'reindexdb', '-v', '-t', 'test1', 'postgres' ],
diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl
index 7f3a9b14a91..b685b352828 100644
--- a/src/bin/scripts/t/100_vacuumdb.pl
+++ b/src/bin/scripts/t/100_vacuumdb.pl
@@ -96,16 +96,16 @@ $node->command_checks_all(
[qr/^WARNING.*cannot vacuum non-tables or special system tables/s],
'vacuumdb with view');
$node->command_fails(
- [ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '0',
- 'postgres'],
+ [ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '0', 'postgres' ],
'vacuumdb --min-mxid-age with incorrect value');
$node->command_fails(
- [ 'vacuumdb', '--table', 'vactable', '--min-xid-age', '0',
- 'postgres'],
+ [ 'vacuumdb', '--table', 'vactable', '--min-xid-age', '0', 'postgres' ],
'vacuumdb --min-xid-age with incorrect value');
$node->issues_sql_like(
- [ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '2147483000',
- 'postgres'],
+ [
+ 'vacuumdb', '--table', 'vactable', '--min-mxid-age',
+ '2147483000', 'postgres'
+ ],
qr/GREATEST.*relminmxid.*2147483000/,
'vacuumdb --table --min-mxid-age');
$node->issues_sql_like(