'postgres', q(
SELECT bt_index_check('bttest_unique_idx1', true, true);
));
-ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx1"/,
+like(
+ $stderr,
+ qr/index uniqueness is violated for index "bttest_unique_idx1"/,
'detected uniqueness violation for index "bttest_unique_idx1"');
#
'postgres', q(
SELECT bt_index_check('bttest_unique_idx2', true, true);
));
-ok( $stderr =~ /item order invariant violated for index "bttest_unique_idx2"/,
+like(
+ $stderr,
+ qr/item order invariant violated for index "bttest_unique_idx2"/,
'detected item order invariant violation for index "bttest_unique_idx2"');
$node->safe_psql(
'postgres', q(
SELECT bt_index_check('bttest_unique_idx2', true, true);
));
-ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx2"/,
+like(
+ $stderr,
+ qr/index uniqueness is violated for index "bttest_unique_idx2"/,
'detected uniqueness violation for index "bttest_unique_idx2"');
#
'postgres', q(
SELECT bt_index_check('bttest_unique_idx3', true, true);
));
-ok( $stderr =~ /item order invariant violated for index "bttest_unique_idx3"/,
+like(
+ $stderr,
+ qr/item order invariant violated for index "bttest_unique_idx3"/,
'detected item order invariant violation for index "bttest_unique_idx3"');
# For unique index deduplication is possible only for same values, but
'postgres', q(
SELECT bt_index_check('bttest_unique_idx3', true, true);
));
-ok( $stderr =~ /index uniqueness is violated for index "bttest_unique_idx3"/,
+like(
+ $stderr,
+ qr/index uniqueness is violated for index "bttest_unique_idx3"/,
'detected uniqueness violation for index "bttest_unique_idx3"');
$node->stop;
"SELECT relpages FROM pg_class
WHERE relname = 'corruption_test';"
);
-ok($npages >= 10, 'table has at least 10 pages');
+cmp_ok($npages, '>=', 10, 'table has at least 10 pages');
my $file = $node->safe_psql("postgres",
"SELECT pg_relation_filepath('corruption_test');");
'multiple --set options with different case');
my $conf = slurp_file("$tempdir/dataY/postgresql.conf");
-ok($conf !~ qr/^WORK_MEM = /m, "WORK_MEM should not be configured");
-ok($conf !~ qr/^Work_Mem = /m, "Work_Mem should not be configured");
-ok($conf =~ qr/^work_mem = 512/m, "work_mem should be in config");
+unlike($conf, qr/^WORK_MEM = /m, "WORK_MEM should not be configured");
+unlike($conf, qr/^Work_Mem = /m, "Work_Mem should not be configured");
+like($conf, qr/^work_mem = 512/m, "work_mem should be in config");
# Test the no-data-checksums flag
my $datadir_nochecksums = "$tempdir/data_no_checksums";
'SELECT system_identifier FROM pg_control_system()');
my $sysid_s = $node_s->safe_psql('postgres',
'SELECT system_identifier FROM pg_control_system()');
-ok($sysid_p != $sysid_s, 'system identifier was changed');
+isnt($sysid_p, $sysid_s, 'system identifier was changed');
# clean up
$node_p->teardown_node;
{
# Get the file's stat information of each segment
my $nlink_count = get_hard_link_count($segment);
- ok($nlink_count == 2, "File '$segment' has 2 hard links");
+ is($nlink_count, 2, "File '$segment' has 2 hard links");
}
# Get the file's stat information of the last segment
my $nlink_count = get_hard_link_count($last_segment);
- ok($nlink_count == $last_segment_nlinks,
+ is($nlink_count, $last_segment_nlinks,
"File '$last_segment' has $last_segment_nlinks hard link(s)");
}
if (($tests{$test}->{like}->{$test_key} || $tests{$test}->{all_runs})
&& !defined($tests{$test}->{unlike}->{$test_key}))
{
- if (!ok($output_file =~ $tests{$test}->{regexp},
+ if (!like(
+ $output_file, $tests{$test}->{regexp},
"$run: should dump $test"))
{
diag("Review $run results in $tempdir");
}
else
{
- if (!ok($output_file !~ $tests{$test}->{regexp},
+ if (!unlike(
+ $output_file, $tests{$test}->{regexp},
"$run: should not dump $test"))
{
diag("Review $run results in $tempdir");
my $dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "table one dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "table two dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "table three dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
+like($dump, qr/^CREATE TABLE public\.table_one/m, "table one dumped");
+like($dump, qr/^CREATE TABLE public\.table_two/m, "table two dumped");
+like($dump, qr/^CREATE TABLE public\.table_three/m, "table three dumped");
+like(
+ $dump,
+ qr/^CREATE TABLE public\.table_three_one/m,
"table three one dumped");
# Test various combinations of whitespace, comments and correct filters
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "dumped table one");
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
-ok($dump !~ qr/^CREATE TABLE public\.table_three/m, "table three not dumped");
-ok($dump !~ qr/^CREATE TABLE public\.table_three_one/m,
+like($dump, qr/^CREATE TABLE public\.table_one/m, "dumped table one");
+like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
+unlike(
+ $dump,
+ qr/^CREATE TABLE public\.table_three/m,
+ "table three not dumped");
+unlike(
+ $dump,
+ qr/^CREATE TABLE public\.table_three_one/m,
"table three_one not dumped");
-ok( $dump !~ qr/^COPY public\.table_one/m,
+unlike(
+ $dump,
+ qr/^COPY public\.table_one/m,
"content of table one is not included");
-ok($dump =~ qr/^COPY public\.table_two/m, "content of table two is included");
+like($dump, qr/^COPY public\.table_two/m, "content of table two is included");
# Test dumping tables specified by qualified names
open $inputfile, '>', "$tempdir/inputfile.txt"
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "dumped table one");
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
-ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
+like($dump, qr/^CREATE TABLE public\.table_one/m, "dumped table one");
+like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
+like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
# Test dumping all tables except one
open $inputfile, '>', "$tempdir/inputfile.txt"
$dump = slurp_file($plainfile);
-ok($dump !~ qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "dumped table two");
-ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
-ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
+unlike($dump, qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
+like($dump, qr/^CREATE TABLE public\.table_two/m, "dumped table two");
+like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
+like(
+ $dump,
+ qr/^CREATE TABLE public\.table_three_one/m,
"dumped table three_one");
# Test dumping tables with a wildcard pattern
$dump = slurp_file($plainfile);
-ok($dump !~ qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
-ok($dump !~ qr/^CREATE TABLE public\.table_two/m, "table two not dumped");
-ok($dump =~ qr/^CREATE TABLE public\.table_three/m, "dumped table three");
-ok($dump =~ qr/^CREATE TABLE public\.table_three_one/m,
+unlike($dump, qr/^CREATE TABLE public\.table_one/m, "table one not dumped");
+unlike($dump, qr/^CREATE TABLE public\.table_two/m, "table two not dumped");
+like($dump, qr/^CREATE TABLE public\.table_three/m, "dumped table three");
+like(
+ $dump,
+ qr/^CREATE TABLE public\.table_three_one/m,
"dumped table three_one");
# Test dumping table with multiline quoted tablename
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public.\"strange aaa/m,
+like(
+ $dump,
+ qr/^CREATE TABLE public.\"strange aaa/m,
"dump table with new line in name");
# Test excluding multiline quoted tablename from dump
$dump = slurp_file($plainfile);
-ok($dump !~ qr/^CREATE TABLE public.\"strange aaa/m,
+unlike(
+ $dump,
+ qr/^CREATE TABLE public.\"strange aaa/m,
"dump table with new line in name");
# Test excluding an entire schema
$dump = slurp_file($plainfile);
-ok($dump !~ qr/^CREATE TABLE/m, "no table dumped");
+unlike($dump, qr/^CREATE TABLE/m, "no table dumped");
# Test including and excluding an entire schema by multiple filterfiles
open $inputfile, '>', "$tempdir/inputfile.txt"
$dump = slurp_file($plainfile);
-ok($dump !~ qr/^CREATE TABLE/m, "no table dumped");
+unlike($dump, qr/^CREATE TABLE/m, "no table dumped");
# Test dumping a table with a single leading newline on a row
open $inputfile, '>', "$tempdir/inputfile.txt"
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
+like(
+ $dump,
+ qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
"dump table with multiline strange name");
open $inputfile, '>', "$tempdir/inputfile.txt"
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
+like(
+ $dump,
+ qr/^CREATE TABLE public.\"\nt\nt\n\" \($/ms,
"dump table with multiline strange name");
#########################################
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE SERVER dummyserver/m, "dump foreign server");
+like($dump, qr/^CREATE SERVER dummyserver/m, "dump foreign server");
open $inputfile, '>', "$tempdir/inputfile.txt"
or die "unable to open filterfile for writing";
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public\.table_one/m, "no table dumped");
+like($dump, qr/^CREATE TABLE public\.table_one/m, "no table dumped");
# Now append a pattern to the filter file which doesn't resolve
open $inputfile, '>>', "$tempdir/inputfile.txt"
$dump = slurp_file($plainfile);
-ok($dump !~ qr/^\\connect postgres/m, "database postgres is not dumped");
-ok($dump =~ qr/^\\connect template1/m, "database template1 is dumped");
+unlike($dump, qr/^\\connect postgres/m, "database postgres is not dumped");
+like($dump, qr/^\\connect template1/m, "database template1 is dumped");
# Make sure this option dont break the existing limitation of using
# --globals-only with exclusions
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public\.table_two/m, "wanted table restored");
-ok($dump !~ qr/^CREATE TABLE public\.table_one/m,
+like($dump, qr/^CREATE TABLE public\.table_two/m, "wanted table restored");
+unlike(
+ $dump,
+ qr/^CREATE TABLE public\.table_one/m,
"unwanted table is not restored");
open $inputfile, '>', "$tempdir/inputfile.txt"
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE FUNCTION public\.foo1/m, "wanted function restored");
-ok( $dump !~ qr/^CREATE TABLE public\.foo2/m,
+like($dump, qr/^CREATE FUNCTION public\.foo1/m, "wanted function restored");
+unlike(
+ $dump,
+ qr/^CREATE TABLE public\.foo2/m,
"unwanted function is not restored");
# this should be white space tolerant (against the -P argument)
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE FUNCTION public\.foo3/m, "wanted function restored");
+like($dump, qr/^CREATE FUNCTION public\.foo3/m, "wanted function restored");
open $inputfile, '>', "$tempdir/inputfile.txt"
or die "unable to open filterfile for writing";
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE INDEX t1_idx1/m, "wanted index restored");
-ok($dump !~ qr/^CREATE INDEX t2_idx2/m, "unwanted index are not restored");
-ok($dump =~ qr/^CREATE TRIGGER trg1/m, "wanted trigger restored");
-ok($dump !~ qr/^CREATE TRIGGER trg2/m, "unwanted trigger is not restored");
+like($dump, qr/^CREATE INDEX t1_idx1/m, "wanted index restored");
+unlike($dump, qr/^CREATE INDEX t2_idx2/m, "unwanted index are not restored");
+like($dump, qr/^CREATE TRIGGER trg1/m, "wanted trigger restored");
+unlike($dump, qr/^CREATE TRIGGER trg2/m, "unwanted trigger is not restored");
open $inputfile, '>', "$tempdir/inputfile.txt"
or die "unable to open filterfile for writing";
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE s1\.t1/m, "wanted table from schema restored");
-ok( $dump =~ qr/^CREATE SEQUENCE s1\.s1/m,
+like($dump, qr/^CREATE TABLE s1\.t1/m, "wanted table from schema restored");
+like(
+ $dump,
+ qr/^CREATE SEQUENCE s1\.s1/m,
"wanted sequence from schema restored");
-ok($dump !~ qr/^CREATE TABLE s2\t2/m, "unwanted table is not restored");
+unlike($dump, qr/^CREATE TABLE s2\t2/m, "unwanted table is not restored");
open $inputfile, '>', "$tempdir/inputfile.txt"
or die "unable to open filterfile for writing";
$dump = slurp_file($plainfile);
-ok($dump !~ qr/^CREATE TABLE s1\.t1/m,
+unlike(
+ $dump,
+ qr/^CREATE TABLE s1\.t1/m,
"unwanted table from schema is not restored");
-ok($dump !~ qr/^CREATE SEQUENCE s1\.s1/m,
+unlike(
+ $dump,
+ qr/^CREATE SEQUENCE s1\.s1/m,
"unwanted sequence from schema is not restored");
-ok($dump =~ qr/^CREATE TABLE s2\.t2/m, "wanted table restored");
-ok($dump =~ qr/^CREATE TABLE public\.t1/m, "wanted table restored");
+like($dump, qr/^CREATE TABLE s2\.t2/m, "wanted table restored");
+like($dump, qr/^CREATE TABLE public\.t1/m, "wanted table restored");
#########################################
# test of supported syntax
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public\.bootab/m, "dumped children table");
+like($dump, qr/^CREATE TABLE public\.bootab/m, "dumped children table");
open $inputfile, '>', "$tempdir/inputfile.txt"
or die "unable to open filterfile for writing";
$dump = slurp_file($plainfile);
-ok($dump !~ qr/^CREATE TABLE public\.bootab/m,
+unlike(
+ $dump,
+ qr/^CREATE TABLE public\.bootab/m,
"exclude dumped children table");
open $inputfile, '>', "$tempdir/inputfile.txt"
$dump = slurp_file($plainfile);
-ok($dump =~ qr/^CREATE TABLE public\.bootab/m, "dumped children table");
-ok($dump !~ qr/^COPY public\.bootab/m, "exclude dumped children table");
+like($dump, qr/^CREATE TABLE public\.bootab/m, "dumped children table");
+unlike($dump, qr/^COPY public\.bootab/m, "exclude dumped children table");
#########################################
# Test extension
'SELECT seed, rand, val, COUNT(*) FROM seeded_random GROUP BY seed, rand, val'
);
-ok($ret == 0, "psql seeded_random count ok");
-ok($err eq '', "psql seeded_random count stderr is empty");
-ok($out =~ /\b$seed\|uniform\|1\d\d\d\|2/,
+is($ret, 0, "psql seeded_random count ok");
+is($err, '', "psql seeded_random count stderr is empty");
+like(
+ $out,
+ qr/\b$seed\|uniform\|1\d\d\d\|2/,
"psql seeded_random count uniform");
-ok( $out =~ /\b$seed\|exponential\|2\d\d\d\|2/,
+like(
+ $out,
+ qr/\b$seed\|exponential\|2\d\d\d\|2/,
"psql seeded_random count exponential");
-ok( $out =~ /\b$seed\|gaussian\|3\d\d\d\|2/,
+like(
+ $out,
+ qr/\b$seed\|gaussian\|3\d\d\d\|2/,
"psql seeded_random count gaussian");
-ok($out =~ /\b$seed\|zipfian\|4\d\d\d\|2/,
+like(
+ $out,
+ qr/\b$seed\|zipfian\|4\d\d\d\|2/,
"psql seeded_random count zipfian");
$node->safe_psql('postgres', 'DROP TABLE seeded_random;');
# $prefix is simple enough, thus does not need escaping
my @logs = list_files($dir, qr{^$prefix\..*$});
- ok(@logs == $nb, "number of log files");
- ok(grep(/\/$prefix\.\d+(\.\d+)?$/, @logs) == $nb, "file name format");
+ is(scalar(@logs), $nb, "number of log files");
+ is(scalar(grep(/\/$prefix\.\d+(\.\d+)?$/, @logs)),
+ $nb, "file name format");
my $log_number = 0;
for my $log (sort @logs)
my @contents = split(/\n/, $contents_raw);
my $clen = @contents;
- ok( $min <= $clen && $clen <= $max,
- "transaction count for $log ($clen)");
+ cmp_ok($clen, '>=', $min,
+ "transaction count for $log ($clen) is above min");
+ cmp_ok($clen, '<=', $max,
+ "transaction count for $log ($clen) is below max");
my $clen_match = grep(/$re/, @contents);
- ok($clen_match == $clen, "transaction format for $prefix");
+ is($clen_match, $clen, "transaction format for $prefix");
# Show more information if some logs don't match
# to help with debugging.
is($ret, 2, 'server crash: psql exit code');
like($out, qr/before/, 'server crash: output before crash');
-ok($out !~ qr/AFTER/, 'server crash: no output after crash');
+unlike($out, qr/AFTER/, 'server crash: no output after crash');
is( $err,
'psql:<stdin>:2: FATAL: terminating connection due to administrator command
psql:<stdin>:2: server closed the connection unexpectedly
'create database with owner role_foobar');
($ret, $stdout, $stderr) =
$node->psql('foobar2', 'DROP OWNED BY role_foobar;', on_error_die => 1,);
-ok($ret == 0, "DROP OWNED BY role_foobar");
+is($ret, 0, "DROP OWNED BY role_foobar");
($ret, $stdout, $stderr) =
$node->psql('foobar2', 'DROP DATABASE foobar8;', on_error_die => 1,);
-ok($ret == 0, "DROP DATABASE foobar8");
+is($ret, 0, "DROP DATABASE foobar8");
done_testing();
my $total_occurrences =
$node1_occurrences + $node2_occurrences + $node3_occurrences;
-ok($node1_occurrences > 1, "received at least one connection on node1");
-ok($node2_occurrences > 1, "received at least one connection on node2");
-ok($node3_occurrences > 1, "received at least one connection on node3");
-ok($total_occurrences == 50, "received 50 connections across all nodes");
+cmp_ok($node1_occurrences, '>', 1,
+ "received at least one connection on node1");
+cmp_ok($node2_occurrences, '>', 1,
+ "received at least one connection on node2");
+cmp_ok($node3_occurrences, '>', 1,
+ "received at least one connection on node3");
+is($total_occurrences, 50, "received 50 connections across all nodes");
$node1->stop();
$node2->stop();
my $total_occurrences =
$node1_occurrences + $node2_occurrences + $node3_occurrences;
-ok($node1_occurrences > 1, "received at least one connection on node1");
-ok($node2_occurrences > 1, "received at least one connection on node2");
-ok($node3_occurrences > 1, "received at least one connection on node3");
-ok($total_occurrences == 50, "received 50 connections across all nodes");
+cmp_ok($node1_occurrences, '>', 1,
+ "received at least one connection on node1");
+cmp_ok($node2_occurrences, '>', 1,
+ "received at least one connection on node2");
+cmp_ok($node3_occurrences, '>', 1,
+ "received at least one connection on node3");
+is($total_occurrences, 50, "received 50 connections across all nodes");
$node1->stop();
$node2->stop();
if ($expect_failure)
{
- ok( $stderr =~
- /$worker_count is outside the valid range for parameter "io_workers"/,
+ like(
+ $stderr,
+ qr/$worker_count is outside the valid range for parameter "io_workers"/,
"updating number of io_workers to $worker_count failed, as expected"
);
# normal run will verify table data
$output = run_sql_command('alter table atacc1 alter test_a set not null;');
ok(!is_table_verified($output), 'with constraint will not scan table');
-ok( $output =~
- m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
+like(
+ $output,
+ qr/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
'test_a proved by constraints');
run_sql_command('alter table atacc1 alter test_a drop not null;');
);
ok(is_table_verified($output), 'table was scanned');
# we may miss debug message for test_a constraint because we need verify table due test_b
-ok( !( $output =~
- m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/
- ),
+unlike(
+ $output,
+ qr/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
'test_b not proved by wrong constraints');
run_sql_command(
'alter table atacc1 alter test_a drop not null, alter test_b drop not null;'
'alter table atacc1 alter test_b set not null, alter test_a set not null;'
);
ok(!is_table_verified($output), 'table was not scanned for both columns');
-ok( $output =~
- m/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
+like(
+ $output,
+ qr/existing constraints on column "atacc1.test_a" are sufficient to prove that it does not contain nulls/,
'test_a proved by constraints');
-ok( $output =~
- m/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
+like(
+ $output,
+ qr/existing constraints on column "atacc1.test_b" are sufficient to prove that it does not contain nulls/,
'test_b proved by constraints');
run_sql_command('drop table atacc1;');
'ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);'
);
ok(!is_table_verified($output), 'table part_3_4 not scanned');
-ok( $output =~
- m/partition constraint for table "part_3_4" is implied by existing constraints/,
+like(
+ $output,
+ qr/partition constraint for table "part_3_4" is implied by existing constraints/,
'part_3_4 verified by existing constraints');
# test attach default partition
$output = run_sql_command(
'ALTER TABLE list_parted2 ATTACH PARTITION list_parted2_def default;');
ok(!is_table_verified($output), 'table list_parted2_def not scanned');
-ok( $output =~
- m/partition constraint for table "list_parted2_def" is implied by existing constraints/,
+like(
+ $output,
+ qr/partition constraint for table "list_parted2_def" is implied by existing constraints/,
'list_parted2_def verified by existing constraints');
$output = run_sql_command(
'CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66);'
);
ok(!is_table_verified($output), 'table list_parted2_def not scanned');
-ok( $output =~
- m/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
+like(
+ $output,
+ qr/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
'updated partition constraint for default partition list_parted2_def');
# test attach another partitioned table
);
$output = run_sql_command(
'ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);');
-ok(!($output =~ m/verifying table "part_5"/), 'table part_5 not scanned');
-ok($output =~ m/verifying table "list_parted2_def"/,
+unlike($output, qr/verifying table "part_5"/, 'table part_5 not scanned');
+like(
+ $output,
+ qr/verifying table "list_parted2_def"/,
'list_parted2_def scanned');
-ok( $output =~
- m/partition constraint for table "part_5" is implied by existing constraints/,
+like(
+ $output,
+ qr/partition constraint for table "part_5" is implied by existing constraints/,
'part_5 verified by existing constraints');
run_sql_command(
);
$output = run_sql_command(
'ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);');
-ok(!($output =~ m/verifying table "part_5"/), 'table part_5 not scanned');
-ok($output =~ m/verifying table "list_parted2_def"/,
+unlike($output, qr/verifying table "part_5"/, 'table part_5 not scanned');
+like(
+ $output,
+ qr/verifying table "list_parted2_def"/,
'list_parted2_def scanned');
-ok( $output =~
- m/partition constraint for table "part_5" is implied by existing constraints/,
+like(
+ $output,
+ qr/partition constraint for table "part_5" is implied by existing constraints/,
'part_5 verified by existing constraints');
# Check the case where attnos of the partitioning columns in the table being
ALTER TABLE part_6 DROP c;');
$output = run_sql_command(
'ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6);');
-ok(!($output =~ m/verifying table "part_6"/), 'table part_6 not scanned');
-ok($output =~ m/verifying table "list_parted2_def"/,
+unlike($output, qr/verifying table "part_6"/, 'table part_6 not scanned');
+like(
+ $output,
+ qr/verifying table "list_parted2_def"/,
'list_parted2_def scanned');
-ok( $output =~
- m/partition constraint for table "part_6" is implied by existing constraints/,
+like(
+ $output,
+ qr/partition constraint for table "part_6" is implied by existing constraints/,
'part_6 verified by existing constraints');
# Similar to above, but the table being attached is a partitioned table
'ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN (\'a\', null);'
);
ok(!is_table_verified($output), 'table not scanned');
-ok( $output =~
- m/partition constraint for table "part_7_a_null" is implied by existing constraints/,
+like(
+ $output,
+ qr/partition constraint for table "part_7_a_null" is implied by existing constraints/,
'part_7_a_null verified by existing constraints');
$output = run_sql_command(
'ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);');
ok(!is_table_verified($output), 'tables not scanned');
-ok( $output =~
- m/partition constraint for table "part_7" is implied by existing constraints/,
+like(
+ $output,
+ qr/partition constraint for table "part_7" is implied by existing constraints/,
'part_7 verified by existing constraints');
-ok( $output =~
- m/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
+like(
+ $output,
+ qr/updated partition constraint for default partition "list_parted2_def" is implied by existing constraints/,
'updated partition constraint for default partition list_parted2_def');
run_sql_command(
'ALTER TABLE range_parted ATTACH PARTITION range_part1 FOR VALUES FROM (1, 1) TO (1, 10);'
);
ok(is_table_verified($output), 'table range_part1 scanned');
-ok( !( $output =~
- m/partition constraint for table "range_part1" is implied by existing constraints/
- ),
+unlike(
+ $output,
+ qr/partition constraint for table "range_part1" is implied by existing constraints/,
'range_part1 not verified by existing constraints');
run_sql_command(
'ALTER TABLE range_parted ATTACH PARTITION range_part2 FOR VALUES FROM (1, 10) TO (1, 20);'
);
ok(!is_table_verified($output), 'table range_part2 not scanned');
-ok( $output =~
- m/partition constraint for table "range_part2" is implied by existing constraints/,
+like(
+ $output,
+ qr/partition constraint for table "range_part2" is implied by existing constraints/,
'range_part2 verified by existing constraints');
# If a partitioned table being created or an existing table being attached
$output = run_sql_command(
'ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1);');
ok(is_table_verified($output), 'quuux1 table scanned');
-ok( !( $output =~
- m/partition constraint for table "quuux1" is implied by existing constraints/
- ),
+unlike(
+ $output,
+ qr/partition constraint for table "quuux1" is implied by existing constraints/,
'quuux1 verified by existing constraints');
run_sql_command('CREATE TABLE quuux2 (a int, b text);');
$output = run_sql_command(
'ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2);');
-ok(!($output =~ m/verifying table "quuux_default1"/),
+unlike(
+ $output,
+ qr/verifying table "quuux_default1"/,
'quuux_default1 not scanned');
-ok($output =~ m/verifying table "quuux2"/, 'quuux2 scanned');
-ok( $output =~
- m/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
+like($output, qr/verifying table "quuux2"/, 'quuux2 scanned');
+like(
+ $output,
+ qr/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
'updated partition constraint for default partition quuux_default1');
run_sql_command('DROP TABLE quuux1, quuux2;');
$output = run_sql_command(
'CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1);');
ok(!is_table_verified($output), 'tables not scanned');
-ok( !( $output =~
- m/partition constraint for table "quuux1" is implied by existing constraints/
- ),
+unlike(
+ $output,
+ qr/partition constraint for table "quuux1" is implied by existing constraints/,
'quuux1 verified by existing constraints');
$output = run_sql_command(
'CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2);');
ok(!is_table_verified($output), 'tables not scanned');
-ok( $output =~
- m/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
+like(
+ $output,
+ qr/updated partition constraint for default partition "quuux_default1" is implied by existing constraints/,
'updated partition constraint for default partition quuux_default1');
run_sql_command('DROP TABLE quuux;');
# Create a tablespace with an absolute path
$result = $node->psql('postgres',
"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
-ok($result == 0, 'create tablespace with absolute path');
+is($result, 0, 'create tablespace with absolute path');
# Can't create a tablespace where there is one already
$result = $node->psql('postgres',
"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
-ok($result != 0, 'clobber tablespace with absolute path');
+isnt($result, 0, 'clobber tablespace with absolute path');
# Create table in it
$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
-ok($result == 0, 'create table in tablespace with absolute path');
+is($result, 0, 'create table in tablespace with absolute path');
# Can't drop a tablespace that still has a table in it
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
-ok($result != 0, 'drop tablespace with absolute path');
+isnt($result, 0, 'drop tablespace with absolute path');
# Drop the table
$result = $node->psql('postgres', "DROP TABLE t");
-ok($result == 0, 'drop table in tablespace with absolute path');
+is($result, 0, 'drop table in tablespace with absolute path');
# Drop the tablespace
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
-ok($result == 0, 'drop tablespace with absolute path');
+is($result, 0, 'drop tablespace with absolute path');
# Create two absolute tablespaces and two in-place tablespaces, so we can
# testing various kinds of tablespace moves.
$result = $node->psql('postgres',
"CREATE TABLESPACE regress_ts1 LOCATION '$TS1_LOCATION'");
-ok($result == 0, 'create tablespace 1 with absolute path');
+is($result, 0, 'create tablespace 1 with absolute path');
$result = $node->psql('postgres',
"CREATE TABLESPACE regress_ts2 LOCATION '$TS2_LOCATION'");
-ok($result == 0, 'create tablespace 2 with absolute path');
+is($result, 0, 'create tablespace 2 with absolute path');
$result = $node->psql('postgres',
"SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''"
);
-ok($result == 0, 'create tablespace 3 with in-place directory');
+is($result, 0, 'create tablespace 3 with in-place directory');
$result = $node->psql('postgres',
"SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''"
);
-ok($result == 0, 'create tablespace 4 with in-place directory');
+is($result, 0, 'create tablespace 4 with in-place directory');
# Create a table and test moving between absolute and in-place tablespaces
$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
-ok($result == 0, 'create table in tablespace 1');
+is($result, 0, 'create table in tablespace 1');
$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts2");
-ok($result == 0, 'move table abs->abs');
+is($result, 0, 'move table abs->abs');
$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts3");
-ok($result == 0, 'move table abs->in-place');
+is($result, 0, 'move table abs->in-place');
$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts4");
-ok($result == 0, 'move table in-place->in-place');
+is($result, 0, 'move table in-place->in-place');
$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts1");
-ok($result == 0, 'move table in-place->abs');
+is($result, 0, 'move table in-place->abs');
# Drop everything
$result = $node->psql('postgres', "DROP TABLE t");
-ok($result == 0, 'create table in tablespace 1');
+is($result, 0, 'create table in tablespace 1');
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
-ok($result == 0, 'drop tablespace 1');
+is($result, 0, 'drop tablespace 1');
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts2");
-ok($result == 0, 'drop tablespace 2');
+is($result, 0, 'drop tablespace 2');
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts3");
-ok($result == 0, 'drop tablespace 3');
+is($result, 0, 'drop tablespace 3');
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts4");
-ok($result == 0, 'drop tablespace 4');
+is($result, 0, 'drop tablespace 4');
$node->stop;
if ($tests{$test}->{like}->{$test_key}
&& !defined($tests{$test}->{unlike}->{$test_key}))
{
- if (!ok($output_file =~ $tests{$test}->{regexp},
+ if (!like(
+ $output_file, $tests{$test}->{regexp},
"$run: should dump $test"))
{
diag("Review $run results in $tempdir");
}
else
{
- if (!ok($output_file !~ $tests{$test}->{regexp},
+ if (!unlike(
+ $output_file, $tests{$test}->{regexp},
"$run: should not dump $test"))
{
diag("Review $run results in $tempdir");
last;
}
}
-ok($warn_limit == 1, "warn-limit reached");
+is($warn_limit, 1, "warn-limit reached");
# We can still INSERT, despite the warnings.
$node->safe_psql('postgres',
'postgres', 'SHOW ALL;',
on_error_die => 1,
extra_params => [ '--dbname' => $connstr_rep ]);
-ok($ret == 0, "SHOW ALL with replication role and physical replication");
+is($ret, 0, "SHOW ALL with replication role and physical replication");
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW ALL;',
on_error_die => 1,
extra_params => [ '--dbname' => $connstr_db ]);
-ok($ret == 0, "SHOW ALL with replication role and logical replication");
+is($ret, 0, "SHOW ALL with replication role and logical replication");
# Test SHOW with a user-settable parameter
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW work_mem;',
on_error_die => 1,
extra_params => [ '--dbname' => $connstr_rep ]);
-ok( $ret == 0,
+is($ret, 0,
"SHOW with user-settable parameter, replication role and physical replication"
);
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW work_mem;',
on_error_die => 1,
extra_params => [ '--dbname' => $connstr_db ]);
-ok( $ret == 0,
+is($ret, 0,
"SHOW with user-settable parameter, replication role and logical replication"
);
'postgres', 'SHOW primary_conninfo;',
on_error_die => 1,
extra_params => [ '--dbname' => $connstr_rep ]);
-ok( $ret == 0,
+is($ret, 0,
"SHOW with superuser-settable parameter, replication role and physical replication"
);
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW primary_conninfo;',
on_error_die => 1,
extra_params => [ '--dbname' => $connstr_db ]);
-ok( $ret == 0,
+is($ret, 0,
"SHOW with superuser-settable parameter, replication role and logical replication"
);
'postgres',
'READ_REPLICATION_SLOT non_existent_slot;',
extra_params => [ '--dbname' => $connstr_rep ]);
-ok($ret == 0, "READ_REPLICATION_SLOT exit code 0 on success");
+is($ret, 0, "READ_REPLICATION_SLOT exit code 0 on success");
like($stdout, qr/^\|\|$/,
"READ_REPLICATION_SLOT returns NULL values if slot does not exist");
'postgres',
"READ_REPLICATION_SLOT $slotname;",
extra_params => [ '--dbname' => $connstr_rep ]);
-ok($ret == 0, "READ_REPLICATION_SLOT success with existing slot");
+is($ret, 0, "READ_REPLICATION_SLOT success with existing slot");
like($stdout, qr/^physical\|[^|]*\|1$/,
"READ_REPLICATION_SLOT returns tuple with slot information");
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
);
chomp($phys_restart_lsn_post);
-ok( ($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0,
+is($phys_restart_lsn_pre, $phys_restart_lsn_post,
"physical slot advance persists across restarts");
# Check if the previous segment gets correctly recycled after the
ok(!$res, 'invalid recovery startup fails');
my $logfile = slurp_file($node_standby->logfile());
-ok($logfile =~ qr/multiple recovery targets specified/,
+like(
+ $logfile,
+ qr/multiple recovery targets specified/,
'multiple conflicting settings');
# Check behavior when recovery ends before target is reached
usleep(100_000);
}
$logfile = slurp_file($node_standby->logfile());
-ok( $logfile =~
- qr/FATAL: .* recovery ended before configured recovery target was reached/,
+like(
+ $logfile,
+ qr/FATAL: .* recovery ended before configured recovery target was reached/,
'recovery end before target reached is a fatal error');
# Invalid timeline target
# This test is successful if and only if the LSN has been applied with at least
# the configured apply delay.
-ok(time() - $primary_insert_time >= $delay,
- "standby applies WAL only after replication delay");
-
-
+cmp_ok(time() - $primary_insert_time,
+ '>=', $delay, "standby applies WAL only after replication delay");
# Check that recovery can be paused or resumed expectedly.
my $node_standby2 = PostgreSQL::Test::Cluster->new('standby2');
$node_standby2->init_from_backup($node_primary, $backup_name,
'template1',
qq[START_REPLICATION SLOT test_slot LOGICAL 0/0],
replication => 'database');
-ok( $stderr =~
- m/replication slot "test_slot" was not created in this database/,
+like(
+ $stderr,
+ qr/replication slot "test_slot" was not created in this database/,
"Logical decoding correctly fails to start");
($result, $stdout, $stderr) = $node_primary->psql(
'template1',
qq[START_REPLICATION SLOT s1 LOGICAL 0/1],
replication => 'true');
-ok($stderr =~ /ERROR: logical decoding requires a database connection/,
+like(
+ $stderr,
+ qr/ERROR: logical decoding requires a database connection/,
"Logical decoding fails on non-database connection");
$node_primary->safe_psql('postgres',
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"
);
chomp($logical_restart_lsn_post);
-ok(($logical_restart_lsn_pre cmp $logical_restart_lsn_post) == 0,
+is($logical_restart_lsn_pre, $logical_restart_lsn_post,
"logical slot advance persists across restarts");
my $stats_test_slot1 = 'test_slot';
$standby2->stop;
my $logfile = slurp_file($standby2->logfile, $log_location);
-ok( $logfile =~ qr/archiver process shutting down/,
+like(
+ $logfile,
+ qr/archiver process shutting down/,
'check shutdown callback of shell archive module');
# Test that we can enter and leave backup mode without crashes
# Confirm that the archive recovery fails with an expected error
my $logfile = slurp_file($recovery_node->logfile());
- ok( $logfile =~
- qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/,
+ like(
+ $logfile,
+ qr/FATAL: .* WAL was generated with "wal_level=minimal", cannot continue recovering/,
"$node_text ends with an error because it finds WAL generated with \"wal_level=minimal\""
);
}
# Confirm that the server startup fails with an expected error
my $logfile = slurp_file($node_standby->logfile());
-ok( $logfile =~
- qr/FATAL: .* logical replication slot ".*" exists on the standby, but "hot_standby" = "off"/,
+like(
+ $logfile,
+ qr/FATAL: .* logical replication slot ".*" exists on the standby, but "hot_standby" = "off"/,
"the standby ends with an error during startup because hot_standby was disabled"
);
$node_standby->adjust_conf('postgresql.conf', 'hot_standby', 'on');
($result, $stdout, $stderr) = $node_standby->psql('otherdb',
"SELECT lsn FROM pg_logical_slot_peek_changes('behaves_ok_activeslot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
);
-ok( $stderr =~
- m/replication slot "behaves_ok_activeslot" was not created in this database/,
+like(
+ $stderr,
+ qr/replication slot "behaves_ok_activeslot" was not created in this database/,
"replaying logical slot from another database fails");
##################################################
'postgres',
qq[select pg_copy_logical_replication_slot('vacuum_full_inactiveslot', 'vacuum_full_inactiveslot_copy');],
replication => 'database');
-ok( $stderr =~
- /ERROR: cannot copy invalidated replication slot "vacuum_full_inactiveslot"/,
+like(
+ $stderr,
+ qr/ERROR: cannot copy invalidated replication slot "vacuum_full_inactiveslot"/,
"invalidated slot cannot be copied");
# Set hot_standby_feedback to on
# Disable failover for enabled subscription
my ($result, $stdout, $stderr) = $subscriber1->psql('postgres',
"ALTER SUBSCRIPTION regress_mysub1 SET (failover = false)");
-ok( $stderr =~
- /ERROR: cannot set option "failover" for enabled subscription/,
+like(
+ $stderr,
+ qr/ERROR: cannot set option "failover" for enabled subscription/,
"altering failover is not allowed for enabled subscription");
##################################################
($result, $stdout, $stderr) =
$publisher->psql('postgres', "SELECT pg_sync_replication_slots();");
-ok( $stderr =~
- /ERROR: replication slots can only be synchronized to a standby server/,
+like(
+ $stderr,
+ qr/ERROR: replication slots can only be synchronized to a standby server/,
"cannot sync slots on a non-standby server");
##################################################
# Attempting to perform logical decoding on a synced slot should result in an error
($result, $stdout, $stderr) = $standby1->psql('postgres',
"select * from pg_logical_slot_get_changes('lsub1_slot', NULL, NULL);");
-ok( $stderr =~
- /ERROR: cannot use replication slot "lsub1_slot" for logical decoding/,
+like(
+ $stderr,
+ qr/ERROR: cannot use replication slot "lsub1_slot" for logical decoding/,
"logical decoding is not allowed on synced slot");
# Attempting to alter a synced slot should result in an error
'postgres',
qq[ALTER_REPLICATION_SLOT lsub1_slot (failover);],
replication => 'database');
-ok($stderr =~ /ERROR: cannot alter replication slot "lsub1_slot"/,
+like(
+ $stderr,
+ qr/ERROR: cannot alter replication slot "lsub1_slot"/,
"synced slot on standby cannot be altered");
# Attempting to drop a synced slot should result in an error
($result, $stdout, $stderr) = $standby1->psql('postgres',
"SELECT pg_drop_replication_slot('lsub1_slot');");
-ok($stderr =~ /ERROR: cannot drop replication slot "lsub1_slot"/,
+like(
+ $stderr,
+ qr/ERROR: cannot drop replication slot "lsub1_slot"/,
"synced slot on standby cannot be dropped");
##################################################
($result, $stdout, $stderr) =
$standby1->psql('postgres', "SELECT pg_sync_replication_slots();");
-ok( $stderr =~
- /ERROR: replication slot synchronization requires "dbname" to be specified in "primary_conninfo"/,
+like(
+ $stderr,
+ qr/ERROR: replication slot synchronization requires "dbname" to be specified in "primary_conninfo"/,
"cannot sync slots if dbname is not specified in primary_conninfo");
# Add the dbname back to the primary_conninfo for further tests
($result, $stdout, $stderr) =
$cascading_standby->psql('postgres', "SELECT pg_sync_replication_slots();");
-ok( $stderr =~
- /ERROR: cannot synchronize replication slots from a standby server/,
+like(
+ $stderr,
+ qr/ERROR: cannot synchronize replication slots from a standby server/,
"cannot sync slots to a cascading standby server");
$cascading_standby->stop;
'postgres', qq[
SELECT pg_replication_slot_advance('logical_slot', '0/1');
]);
-ok( $stderr =~ /can no longer access replication slot "logical_slot"/,
+like(
+ $stderr,
+ qr/can no longer access replication slot "logical_slot"/,
"detected error upon trying to acquire invalidated slot on node")
or die
"could not detect error upon trying to acquire invalidated slot \"logical_slot\" on node";
$node_publisher->wait_for_catchup('tap_sub');
my $logfile = slurp_file($node_subscriber->logfile, $log_location);
-ok( $logfile =~
- qr/conflict detected on relation "public.tab_full_pk": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(1, quux\); replica identity \(a\)=\(1\)/m,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab_full_pk": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(1, quux\); replica identity \(a\)=\(1\)/m,
'update target row is missing');
-ok( $logfile =~
- qr/conflict detected on relation "public.tab_full": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(26\); replica identity full \(25\)/m,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab_full": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(26\); replica identity full \(25\)/m,
'update target row is missing');
-ok( $logfile =~
- qr/conflict detected on relation "public.tab_full_pk": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(2\)/m,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab_full_pk": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(2\)/m,
'delete target row is missing');
$node_subscriber->append_conf('postgresql.conf',
$node_publisher->wait_for_catchup('tap_sub');
$logfile = slurp_file($node_publisher->logfile, $log_location);
-ok($logfile =~ qr/skipped replication of an empty transaction with XID/,
+like(
+ $logfile,
+ qr/skipped replication of an empty transaction with XID/,
'empty transaction is skipped');
$result =
CREATE PUBLICATION tap_pub2 FOR TABLE skip_wal;
ROLLBACK;
});
-ok( $reterr =~
- m/WARNING: "wal_level" is insufficient to publish logical changes/,
+like(
+ $reterr,
+ qr/WARNING: "wal_level" is insufficient to publish logical changes/,
'CREATE PUBLICATION while "wal_level=minimal"');
done_testing();
my ($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
"CREATE SUBSCRIPTION mysub1 CONNECTION '$publisher_connstr' PUBLICATION mypub, non_existent_pub"
);
-ok( $stderr =~
- m/WARNING: publication "non_existent_pub" does not exist on the publisher/,
+like(
+ $stderr,
+ qr/WARNING: publication "non_existent_pub" does not exist on the publisher/,
"Create subscription throws warning for non-existent publication");
# Wait for initial table sync to finish.
($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
"ALTER SUBSCRIPTION mysub1 ADD PUBLICATION non_existent_pub1, non_existent_pub2"
);
-ok( $stderr =~
- m/WARNING: publications "non_existent_pub1", "non_existent_pub2" do not exist on the publisher/,
+like(
+ $stderr,
+ qr/WARNING: publications "non_existent_pub1", "non_existent_pub2" do not exist on the publisher/,
"Alter subscription add publication throws warning for non-existent publications"
);
# Specifying non-existent publication along with set publication.
($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
"ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub");
-ok( $stderr =~
- m/WARNING: publication "non_existent_pub" does not exist on the publisher/,
+like(
+ $stderr,
+ qr/WARNING: publication "non_existent_pub" does not exist on the publisher/,
"Alter subscription set publication throws warning for non-existent publication"
);
$node_publisher->wait_for_catchup('sub2');
my $logfile = slurp_file($node_subscriber1->logfile(), $log_location);
-ok( $logfile =~
- qr/conflict detected on relation "public.tab1_2_2": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(null, 4, quux\); replica identity \(a\)=\(4\)/,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab1_2_2": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(null, 4, quux\); replica identity \(a\)=\(4\)/,
'update target row is missing in tab1_2_2');
-ok( $logfile =~
- qr/conflict detected on relation "public.tab1_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab1_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/,
'delete target row is missing in tab1_1');
-ok( $logfile =~
- qr/conflict detected on relation "public.tab1_2_2": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(4\)/,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab1_2_2": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(4\)/,
'delete target row is missing in tab1_2_2');
-ok( $logfile =~
- qr/conflict detected on relation "public.tab1_def": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(10\)/,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab1_def": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(10\)/,
'delete target row is missing in tab1_def');
# Tests for replication using root table identity and schema
$node_publisher->wait_for_catchup('sub2');
$logfile = slurp_file($node_subscriber1->logfile(), $log_location);
-ok( $logfile =~
- qr/conflict detected on relation "public.tab2_1": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(pub_tab2, quux, 5\); replica identity \(a\)=\(5\)/,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab2_1": conflict=update_missing.*\n.*DETAIL:.* Could not find the row to be updated.*\n.*Remote row \(pub_tab2, quux, 5\); replica identity \(a\)=\(5\)/,
'update target row is missing in tab2_1');
-ok( $logfile =~
- qr/conflict detected on relation "public.tab2_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab2_1": conflict=delete_missing.*\n.*DETAIL:.* Could not find the row to be deleted.*\n.*Replica identity \(a\)=\(1\)/,
'delete target row is missing in tab2_1');
# Enable the track_commit_timestamp to detect the conflict when attempting
$node_publisher->wait_for_catchup('sub_viaroot');
$logfile = slurp_file($node_subscriber1->logfile(), $log_location);
-ok( $logfile =~
- qr/conflict detected on relation "public.tab2_1": conflict=update_origin_differs.*\n.*DETAIL:.* Updating the row that was modified locally in transaction [0-9]+ at .*\n.*Existing local row \(yyy, null, 3\); remote row \(pub_tab2, quux, 3\); replica identity \(a\)=\(3\)/,
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab2_1": conflict=update_origin_differs.*\n.*DETAIL:.* Updating the row that was modified locally in transaction [0-9]+ at .*\n.*Existing local row \(yyy, null, 3\); remote row \(pub_tab2, quux, 3\); replica identity \(a\)=\(3\)/,
'updating a row that was modified by a different origin');
# The remaining tests no longer test conflict detection.
isnt($ret, 0,
"non zero exit for subscription whose owner is a non-superuser must specify password parameter of the connection string"
);
- ok( $stderr =~
- m/DETAIL: Non-superusers must provide a password in the connection string./,
+ like(
+ $stderr,
+ qr/DETAIL: Non-superusers must provide a password in the connection string./,
'subscription whose owner is a non-superuser must specify password parameter of the connection string'
);
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_1, pub_mix_2;
));
-ok( $stderr =~
- qr/cannot use different column lists for table "public.test_mix_1" in different publications/,
+like(
+ $stderr,
+ qr/cannot use different column lists for table "public.test_mix_1" in different publications/,
'different column lists detected');
# TEST: If the column list is changed after creating the subscription, we
# Alter retain_dead_tuples for enabled subscription
my ($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
"ALTER SUBSCRIPTION $subname_AB SET (retain_dead_tuples = true)");
-ok( $stderr =~
- /ERROR: cannot set option \"retain_dead_tuples\" for enabled subscription/,
+like(
+ $stderr,
+ qr/ERROR: cannot set option \"retain_dead_tuples\" for enabled subscription/,
"altering retain_dead_tuples is not allowed for enabled subscription");
# Disable the subscription
# Enable retain_dead_tuples for disabled subscription
($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
"ALTER SUBSCRIPTION $subname_AB SET (retain_dead_tuples = true);");
-ok( $stderr =~
- /NOTICE: deleted rows to detect conflicts would not be removed until the subscription is enabled/,
+like(
+ $stderr,
+ qr/NOTICE: deleted rows to detect conflicts would not be removed until the subscription is enabled/,
"altering retain_dead_tuples is allowed for disabled subscription");
# Re-enable the subscription
($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
"ALTER SUBSCRIPTION $subname_AB SET (origin = any);");
-ok( $stderr =~
- /WARNING: subscription "tap_sub_a_b" enabled retain_dead_tuples but might not reliably detect conflicts for changes from different origins/,
- "warn of the possibility of receiving changes from origins other than the publisher");
+like(
+ $stderr,
+ qr/WARNING: subscription "tap_sub_a_b" enabled retain_dead_tuples but might not reliably detect conflicts for changes from different origins/,
+ "warn of the possibility of receiving changes from origins other than the publisher"
+);
# Reset the origin to none
$node_A->psql('postgres',
'postgres', qq(VACUUM (verbose) public.tab;)
);
-ok( $stderr =~
- qr/1 are dead but not yet removable/,
+like(
+ $stderr,
+ qr/1 are dead but not yet removable/,
'the deleted column is non-removable');
# Ensure the DELETE is replayed on Node B
# Check the conflict detected on Node B
my $logfile = slurp_file($node_B->logfile(), $log_location);
-ok( $logfile =~
- qr/conflict detected on relation "public.tab": conflict=delete_origin_differs.*
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab": conflict=delete_origin_differs.*
.*DETAIL:.* Deleting the row that was modified locally in transaction [0-9]+ at .*
.*Existing local row \(1, 3\); replica identity \(a\)=\(1\)/,
'delete target row was modified in tab');
$node_B->wait_for_catchup($subname_AB);
$logfile = slurp_file($node_A->logfile(), $log_location);
-ok( $logfile =~
- qr/conflict detected on relation "public.tab": conflict=update_deleted.*
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab": conflict=update_deleted.*
.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
.*Remote row \(1, 3\); replica identity \(a\)=\(1\)/,
'update target row was deleted in tab');
$node_B->wait_for_catchup($subname_AB);
$logfile = slurp_file($node_A->logfile(), $log_location);
-ok( $logfile =~
- qr/conflict detected on relation "public.tab": conflict=update_deleted.*
+like(
+ $logfile,
+ qr/conflict detected on relation "public.tab": conflict=update_deleted.*
.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
.*Remote row \(2, 4\); replica identity full \(2, 2\)/,
'update target row was deleted in tab');
($cmdret, $stdout, $stderr) =
$node_A->psql('postgres', qq(VACUUM (verbose) public.tab;));
- ok($stderr =~ qr/1 are dead but not yet removable/,
+ like(
+ $stderr,
+ qr/1 are dead but not yet removable/,
'the deleted column is non-removable');
$log_location = -s $node_A->logfile;
$node_B->wait_for_catchup($subname_AB);
$logfile = slurp_file($node_A->logfile(), $log_location);
- ok( $logfile =~
- qr/conflict detected on relation "public.tab": conflict=update_deleted.*
+ like(
+ $logfile,
+ qr/conflict detected on relation "public.tab": conflict=update_deleted.*
.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
.*Remote row \(1, 2\); replica identity full \(1, 1\)/,
'update target row was deleted in tab');