From d601bee765571ff346ed4c7c326c9cc9d6c9674e Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Tue, 15 Mar 2022 16:12:40 +0500 Subject: [PATCH 01/13] Added dry-run option for catchup. Run catchup without affect on the files and WAL --- src/catchup.c | 113 ++++++++++++++++++++++++++++++-------------------- src/ptrack.c | 5 ++- 2 files changed, 72 insertions(+), 46 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index 1b8f8084d..294d9e3d5 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -166,14 +166,16 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, source_id = get_system_identifier(source_pgdata, FIO_DB_HOST, false); /* same as instance_config.system_identifier */ if (source_conn_id != source_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", + elog(ERROR, "Database identifiers mismatch: we %s connected to DB id %lu, but in \"%s\" we found id %lu", + dry_run? "can":"will", source_conn_id, source_pgdata, source_id); if (current.backup_mode != BACKUP_MODE_FULL) { dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST, false); if (source_conn_id != dest_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", + elog(ERROR, "Database identifiers mismatch: we %s connected to DB id %lu, but in \"%s\" we found id %lu", + dry_run? "can":"will", source_conn_id, dest_pgdata, dest_id); } } @@ -706,9 +708,12 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Start stream replication */ join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); - fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); - start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, - current.start_lsn, current.tli, false); + if (!dry_run) + { + fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); + start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, + current.start_lsn, current.tli, false); + } source_filelist = parray_new(); @@ -820,9 +825,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char dirpath[MAXPGPATH]; join_path_components(dirpath, dest_pgdata, file->rel_path); - - elog(VERBOSE, "Create directory '%s'", dirpath); - fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); + elog(VERBOSE, "Directory '%s' %s be created", dirpath, dry_run? "can":"will"); + if (!dry_run) + fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); } else { @@ -850,18 +855,21 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(to_path, dest_pgdata, file->rel_path); - elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", - linked_path, to_path); + elog(VERBOSE, "Directory \"%s\" and symbolic link \"%s\" %s be created", + linked_path, to_path, dry_run? "can":"will"); - /* create tablespace directory */ - if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) - elog(ERROR, "Could not create tablespace directory \"%s\": %s", - linked_path, strerror(errno)); - - /* create link to linked_path */ - if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) - elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", - linked_path, to_path, strerror(errno)); + if (!dry_run) + { + /* create tablespace directory */ + if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) + elog(ERROR, "Could not create tablespace directory \"%s\": %s", + linked_path, strerror(errno)); + + /* create link to linked_path */ + if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) + elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", + linked_path, to_path, strerror(errno)); + } } } @@ -901,7 +909,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, */ if (current.backup_mode != BACKUP_MODE_FULL) { - elog(INFO, "Removing redundant files in destination directory"); + elog(INFO, "Redundant files %s in destination directory", dry_run ? "can" : "will"); parray_qsort(dest_filelist, pgFileCompareRelPathWithExternalDesc); for (i = 0; i < parray_num(dest_filelist); i++) { @@ -930,11 +938,15 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); - fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + if (!dry_run) + { + fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); + elog(VERBOSE, "File \"%s\" %s deleted", fullpath, dry_run ? "can" : "will"); + } /* shrink dest pgdata list */ - pgFileFree(file); + if (!dry_run) + pgFileFree(file); parray_remove(dest_filelist, i); i--; } @@ -951,17 +963,20 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (dest_filelist) parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal); - /* run copy threads */ - elog(INFO, "Start transferring data files"); - time(&start_time); - transfered_datafiles_bytes = catchup_multithreaded_copy(num_threads, &source_node_info, - source_pgdata, dest_pgdata, - source_filelist, dest_filelist, - dest_redo.lsn, current.backup_mode); - catchup_isok = transfered_datafiles_bytes != -1; + if (!dry_run) + { + /* run copy threads */ + elog(INFO, "Start transferring data files"); + time(&start_time); + transfered_datafiles_bytes = catchup_multithreaded_copy(num_threads, &source_node_info, + source_pgdata, dest_pgdata, + source_filelist, dest_filelist, + dest_redo.lsn, current.backup_mode); + catchup_isok = transfered_datafiles_bytes != -1; + } /* at last copy control file */ - if (catchup_isok) + if (catchup_isok && !dry_run) { char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; @@ -972,7 +987,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, transfered_datafiles_bytes += source_pg_control_file->size; } - if (!catchup_isok) + if (!catchup_isok && !dry_run) { char pretty_time[20]; char pretty_transfered_data_bytes[20]; @@ -1010,15 +1025,19 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_free(stop_backup_query_text); } - wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); + if (!dry_run) + wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); #if PG_VERSION_NUM >= 90600 /* Write backup_label */ Assert(stop_backup_result.backup_label_content != NULL); - pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, - NULL); - free(stop_backup_result.backup_label_content); + if (!dry_run) + { + pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + NULL); + free(stop_backup_result.backup_label_content); + } stop_backup_result.backup_label_content = NULL; stop_backup_result.backup_label_content_len = 0; @@ -1040,6 +1059,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, #endif /* wait for end of wal streaming and calculate wal size transfered */ + if (!dry_run) { parray *wal_files_list = NULL; wal_files_list = parray_new(); @@ -1081,7 +1101,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pretty_size(transfered_datafiles_bytes, pretty_transfered_data_bytes, lengthof(pretty_transfered_data_bytes)); pretty_size(transfered_walfiles_bytes, pretty_transfered_wal_bytes, lengthof(pretty_transfered_wal_bytes)); - elog(INFO, "Databases synchronized. Transfered datafiles size: %s, transfered wal size: %s, time elapsed: %s", + elog(INFO, "Databases %s synchronized. Transfered datafiles size: %s, transfered wal size: %s, time elapsed: %s", + dry_run ? "can be" : "was", pretty_transfered_data_bytes, pretty_transfered_wal_bytes, pretty_time); if (current.backup_mode != BACKUP_MODE_FULL) @@ -1091,13 +1112,17 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* Sync all copied files unless '--no-sync' flag is used */ - if (sync_dest_files) - catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); - else - elog(WARNING, "Files are not synced to disk"); + if (!dry_run) + { + /* Sync all copied files unless '--no-sync' flag is used */ + if (sync_dest_files) + catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); + else + elog(WARNING, "Files are not synced to disk"); + } /* Cleanup */ - if (dest_filelist) + if (dest_filelist && !dry_run) { parray_walk(dest_filelist, pgFileFree); parray_free(dest_filelist); diff --git a/src/ptrack.c b/src/ptrack.c index ebcba1dd4..bde37ba1f 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -260,8 +260,9 @@ make_pagemap_from_ptrack_2(parray *files, page_map_entry *dummy_map = NULL; /* Receive all available ptrack bitmaps at once */ - filemaps = pg_ptrack_get_pagemapset(backup_conn, ptrack_schema, - ptrack_version_num, lsn); + if (!dry_run) + filemaps = pg_ptrack_get_pagemapset(backup_conn, ptrack_schema, + ptrack_version_num, lsn); if (filemaps != NULL) parray_qsort(filemaps, pgFileMapComparePath); From 00f781d40144aa5ffcadac58083b3082dcba096d Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Tue, 15 Mar 2022 16:14:56 +0500 Subject: [PATCH 02/13] Add tests for dry-run option in catchup for FULL and PTRACK mode --- tests/catchup.py | 98 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/tests/catchup.py b/tests/catchup.py index 8441deaaf..d971dd408 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1455,3 +1455,101 @@ def test_config_exclusion(self): dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) + + def test_dry_run_catchup_full(self): + """ + Test dry-run option for full catchup + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + + # save the condition before dry-run + dst_before = dst_pg.data_dir + + # do full catchup + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + self.pgdata_content(dst_before), + self.pgdata_content(dst_pg.data_dir) + ) + + # compare data dirs before and after cathup +# self.compare_pgdata( +# self.pgdata_content(dst_before), +# self.pgdata_content(dst_pg.data_dir) +# ) + + # Cleanup + src_pg.stop() + + def test_dry_run_catchup_ptrack(self): + """ + Test dry-run option for catchup in incremental mode + """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # save the condition before dry-run + dst_before = dst_pg.data_dir + + # do incremental catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + self.pgdata_content(dst_before), + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + From 985cba33238d3f6f63b0e42d1a9bca1bc519e9cb Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Fri, 18 Mar 2022 14:02:04 +0500 Subject: [PATCH 03/13] Add dry-run option for catchup into help message --- .travis.yml | 2 +- src/help.c | 3 +++ tests/expected/option_help.out | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 876289e82..0a9e30fe9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,7 +36,7 @@ env: - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup + - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=compression # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=delta # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=locking diff --git a/src/help.c b/src/help.c index a494ab209..ccffaea34 100644 --- a/src/help.c +++ b/src/help.c @@ -1047,6 +1047,7 @@ help_catchup(void) printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); + printf(_(" [--dry-run]\n")); printf(_(" [--help]\n\n")); printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n")); @@ -1081,4 +1082,6 @@ help_catchup(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); + + printf(_(" --dry-run perform a trial run without any changes\n\n")); } diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index a8b4a64b3..62c2da7cc 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -178,6 +178,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] Read the website for details. From cb46fd162e645c53609685814efb4bb7bf8b6cab Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Sun, 27 Mar 2022 20:45:33 +0500 Subject: [PATCH 04/13] Fix review notes. Correct compute size of transfered files with dry-run option --- src/catchup.c | 56 +++++++++++++++++++++++------------------------- src/ptrack.c | 5 ++--- tests/catchup.py | 14 ++++-------- 3 files changed, 33 insertions(+), 42 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index 294d9e3d5..6985a4dac 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -511,14 +511,18 @@ catchup_multithreaded_copy(int num_threads, threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); for (i = 0; i < num_threads; i++) { - elog(VERBOSE, "Start thread num: %i", i); - pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + if (!dry_run) + { + elog(VERBOSE, "Start thread num: %i", i); + pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + } } /* Wait threads */ for (i = 0; i < num_threads; i++) { - pthread_join(threads[i], NULL); + if (!dry_run) + pthread_join(threads[i], NULL); all_threads_successful &= threads_args[i].completed; transfered_bytes_result += threads_args[i].transfered_bytes; } @@ -714,6 +718,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, current.start_lsn, current.tli, false); } + else + elog(INFO, "WAL streaming cannot be started with --dry-run option"); source_filelist = parray_new(); @@ -784,9 +790,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Build the page map from ptrack information */ make_pagemap_from_ptrack_2(source_filelist, source_conn, - source_node_info.ptrack_schema, - source_node_info.ptrack_version_num, - dest_redo.lsn); + source_node_info.ptrack_schema, + source_node_info.ptrack_version_num, + dest_redo.lsn); time(&end_time); elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec", difftime(end_time, start_time)); @@ -909,7 +915,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, */ if (current.backup_mode != BACKUP_MODE_FULL) { - elog(INFO, "Redundant files %s in destination directory", dry_run ? "can" : "will"); + elog(INFO, "Redundant files in destination directory %s be removed", dry_run ? "can" : "will"); parray_qsort(dest_filelist, pgFileCompareRelPathWithExternalDesc); for (i = 0; i < parray_num(dest_filelist); i++) { @@ -945,8 +951,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* shrink dest pgdata list */ - if (!dry_run) - pgFileFree(file); + pgFileFree(file); parray_remove(dest_filelist, i); i--; } @@ -963,17 +968,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (dest_filelist) parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal); - if (!dry_run) - { - /* run copy threads */ - elog(INFO, "Start transferring data files"); - time(&start_time); - transfered_datafiles_bytes = catchup_multithreaded_copy(num_threads, &source_node_info, - source_pgdata, dest_pgdata, - source_filelist, dest_filelist, - dest_redo.lsn, current.backup_mode); - catchup_isok = transfered_datafiles_bytes != -1; - } + /* run copy threads */ + elog(INFO, "Transferring data files %s started", dry_run ? "can be" : ""); + time(&start_time); + transfered_datafiles_bytes = catchup_multithreaded_copy(num_threads, &source_node_info, + source_pgdata, dest_pgdata, + source_filelist, dest_filelist, + dest_redo.lsn, current.backup_mode); + catchup_isok = transfered_datafiles_bytes != -1; /* at last copy control file */ if (catchup_isok && !dry_run) @@ -1101,7 +1103,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pretty_size(transfered_datafiles_bytes, pretty_transfered_data_bytes, lengthof(pretty_transfered_data_bytes)); pretty_size(transfered_walfiles_bytes, pretty_transfered_wal_bytes, lengthof(pretty_transfered_wal_bytes)); - elog(INFO, "Databases %s synchronized. Transfered datafiles size: %s, transfered wal size: %s, time elapsed: %s", + elog(INFO, "Databases %s synchronized. Transfered datafiles sizes: %s, transfered wal size: %s, time elapsed: %s", dry_run ? "can be" : "was", pretty_transfered_data_bytes, pretty_transfered_wal_bytes, pretty_time); @@ -1112,14 +1114,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* Sync all copied files unless '--no-sync' flag is used */ - if (!dry_run) - { - /* Sync all copied files unless '--no-sync' flag is used */ - if (sync_dest_files) - catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); - else - elog(WARNING, "Files are not synced to disk"); - } + if (sync_dest_files && !dry_run) + catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); + else + elog(WARNING, "Files are not synced to disk"); /* Cleanup */ if (dest_filelist && !dry_run) diff --git a/src/ptrack.c b/src/ptrack.c index bde37ba1f..ebcba1dd4 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -260,9 +260,8 @@ make_pagemap_from_ptrack_2(parray *files, page_map_entry *dummy_map = NULL; /* Receive all available ptrack bitmaps at once */ - if (!dry_run) - filemaps = pg_ptrack_get_pagemapset(backup_conn, ptrack_schema, - ptrack_version_num, lsn); + filemaps = pg_ptrack_get_pagemapset(backup_conn, ptrack_schema, + ptrack_version_num, lsn); if (filemaps != NULL) parray_qsort(filemaps, pgFileMapComparePath); diff --git a/tests/catchup.py b/tests/catchup.py index d971dd408..8a3df493c 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1475,7 +1475,7 @@ def test_dry_run_catchup_full(self): dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) # save the condition before dry-run - dst_before = dst_pg.data_dir + content_before = self.pgdata_content(dst_pg.data_dir) # do full catchup self.catchup_node( @@ -1487,16 +1487,10 @@ def test_dry_run_catchup_full(self): # compare data dirs before and after cathup self.compare_pgdata( - self.pgdata_content(dst_before), + content_before, self.pgdata_content(dst_pg.data_dir) ) - # compare data dirs before and after cathup -# self.compare_pgdata( -# self.pgdata_content(dst_before), -# self.pgdata_content(dst_pg.data_dir) -# ) - # Cleanup src_pg.stop() @@ -1534,7 +1528,7 @@ def test_dry_run_catchup_ptrack(self): dst_pg.stop() # save the condition before dry-run - dst_before = dst_pg.data_dir + content_before = self.pgdata_content(dst_pg.data_dir) # do incremental catchup self.catchup_node( @@ -1546,7 +1540,7 @@ def test_dry_run_catchup_ptrack(self): # compare data dirs before and after cathup self.compare_pgdata( - self.pgdata_content(dst_before), + content_before, self.pgdata_content(dst_pg.data_dir) ) From ac77a144789129f8e7dede44e2fe4c04900f18ae Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 12 Apr 2022 09:27:09 +0300 Subject: [PATCH 05/13] cosmetic change + try to skipping test_dry_run_catchup_ptrack in travis runs --- .travis.yml | 4 ++++ src/catchup.c | 2 +- tests/catchup.py | 5 ++++- travis/run_tests.sh | 9 +++++++++ 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0a9e30fe9..a557aea50 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,7 +36,11 @@ env: - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup + - PG_VERSION=13 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=catchup - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup + - PG_VERSION=13 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE MODE=catchup + - PG_VERSION=13 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE MODE=catchup + - PG_VERSION=13 PG_BRANCH=REL_10_STABLE PTRACK_PATCH_PG_BRANCH=REL_10_STABLE MODE=catchup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=compression # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=delta # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=locking diff --git a/src/catchup.c b/src/catchup.c index 6985a4dac..5084e012e 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -2,7 +2,7 @@ * * catchup.c: sync DB cluster * - * Copyright (c) 2021, Postgres Professional + * Copyright (c) 2022, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/tests/catchup.py b/tests/catchup.py index 8a3df493c..7ea59ab98 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1456,6 +1456,9 @@ def test_config_exclusion(self): #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) +######################################### +# --dry-run +######################################### def test_dry_run_catchup_full(self): """ Test dry-run option for full catchup @@ -1485,7 +1488,7 @@ def test_dry_run_catchup_full(self): options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] ) - # compare data dirs before and after cathup + # compare data dirs before and after catchup self.compare_pgdata( content_before, self.pgdata_content(dst_pg.data_dir) diff --git a/travis/run_tests.sh b/travis/run_tests.sh index 44815407e..4eec6f29a 100755 --- a/travis/run_tests.sh +++ b/travis/run_tests.sh @@ -100,11 +100,20 @@ source pyenv/bin/activate pip3 install testgres echo "############### Testing:" +echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA} +echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION} +echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD} +echo PGPROBACKUPBIN=${PGPROBACKUPBIN} +echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} +echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} +echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests python3 -m unittest -v tests.init else + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} python3 -m unittest -v tests.$MODE fi From 4d6a3e9adcf7b06b08aaa40f8bb9a45dcebfe495 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Wed, 13 Apr 2022 12:11:59 +0500 Subject: [PATCH 06/13] Fix catchup tests for dry-run option --- tests/catchup.py | 78 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 74 insertions(+), 4 deletions(-) diff --git a/tests/catchup.py b/tests/catchup.py index 7ea59ab98..9bac2f69d 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1470,13 +1470,18 @@ def test_dry_run_catchup_full(self): pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question(answer int)") # preparation 2: make clean shutdowned lagging behind replica dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") + # save the condition before dry-run content_before = self.pgdata_content(dst_pg.data_dir) @@ -1496,10 +1501,12 @@ def test_dry_run_catchup_full(self): # Cleanup src_pg.stop() + dst_pg.stop() + self.del_test_dir(module_name, self.fname) def test_dry_run_catchup_ptrack(self): """ - Test dry-run option for catchup in incremental mode + Test dry-run option for catchup in incremental ptrack mode """ if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') @@ -1514,6 +1521,10 @@ def test_dry_run_catchup_ptrack(self): src_pg.safe_psql( "postgres", "CREATE TABLE ultimate_question(answer int)") + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") # preparation 2: make clean shutdowned lagging behind replica dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) @@ -1549,4 +1560,63 @@ def test_dry_run_catchup_ptrack(self): # Cleanup src_pg.stop() + dst_pg.stop() + self.del_test_dir(module_name, self.fname) + + def test_dry_run_catchup_delta(self): + """ + Test dry-run option for catchup in incremental delta mode + """ + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(module_name, self.fname, 'src'), + set_replication = True, + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question(answer int)") + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do incremental catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + dst_pg.stop() + self.del_test_dir(module_name, self.fname) From b4d565f7eaf899023c71dc2d3e10facf7a9db720 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Thu, 14 Apr 2022 19:19:28 +0500 Subject: [PATCH 07/13] Fix tests for dry-run option. Add test dry-run for DELTA backup --- .travis.yml | 2 +- tests/catchup.py | 36 +++++++++++------------------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/.travis.yml b/.travis.yml index 85618a2b1..70135fe99 100644 --- a/.travis.yml +++ b/.travis.yml @@ -40,7 +40,7 @@ env: - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE MODE=catchup - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE MODE=catchup - - PG_VERSION=10 PG_BRANCH=REL_10_STABLE PTRACK_PATCH_PG_BRANCH=REL_10_STABLE MODE=catchup + - PG_VERSION=10 PG_BRANCH=REL_10_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=catchup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=compression # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking diff --git a/tests/catchup.py b/tests/catchup.py index 9bac2f69d..6afafb44b 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1466,21 +1466,16 @@ def test_dry_run_catchup_full(self): # preparation 1: source src_pg = self.make_simple_node( base_dir = os.path.join(module_name, self.fname, 'src'), - set_replication = True, - pg_options = { 'wal_log_hints': 'on' } + set_replication = True ) src_pg.slow_start() # preparation 2: make clean shutdowned lagging behind replica dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question(answer int)") src_pg.pgbench_init(scale = 10) pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() - src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") # save the condition before dry-run content_before = self.pgdata_content(dst_pg.data_dir) @@ -1501,30 +1496,25 @@ def test_dry_run_catchup_full(self): # Cleanup src_pg.stop() - dst_pg.stop() self.del_test_dir(module_name, self.fname) def test_dry_run_catchup_ptrack(self): """ Test dry-run option for catchup in incremental ptrack mode """ - if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') - - # preparation 1: source + # preparation 1: source src_pg = self.make_simple_node( base_dir = os.path.join(module_name, self.fname, 'src'), set_replication = True, - pg_options = { 'wal_log_hints': 'on' } + ptrack_enable = True, + initdb_params = ['--data-checksums'] ) src_pg.slow_start() - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question(answer int)") + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + src_pg.pgbench_init(scale = 10) pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() - src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") # preparation 2: make clean shutdowned lagging behind replica dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) @@ -1560,7 +1550,6 @@ def test_dry_run_catchup_ptrack(self): # Cleanup src_pg.stop() - dst_pg.stop() self.del_test_dir(module_name, self.fname) def test_dry_run_catchup_delta(self): @@ -1572,18 +1561,16 @@ def test_dry_run_catchup_delta(self): src_pg = self.make_simple_node( base_dir = os.path.join(module_name, self.fname, 'src'), set_replication = True, + initdb_params = ['--data-checksums'], pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() - src_pg.safe_psql( - "postgres", - "CREATE TABLE ultimate_question(answer int)") + src_pg.pgbench_init(scale = 10) pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() - src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - # preparation 2: make clean shutdowned lagging behind replica + # preparation 2: make clean shutdowned lagging behind replica dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', @@ -1601,12 +1588,12 @@ def test_dry_run_catchup_delta(self): # save the condition before dry-run content_before = self.pgdata_content(dst_pg.data_dir) - # do incremental catchup + # do delta catchup self.catchup_node( backup_mode = 'DELTA', source_pgdata = src_pg.data_dir, destination_node = dst_pg, - options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', "--dry-run"] ) # compare data dirs before and after cathup @@ -1617,6 +1604,5 @@ def test_dry_run_catchup_delta(self): # Cleanup src_pg.stop() - dst_pg.stop() self.del_test_dir(module_name, self.fname) From c32f85450e52e3b0f684a792d0b249c21a38f158 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Fri, 15 Apr 2022 14:21:04 +0500 Subject: [PATCH 08/13] Add skipping test if ptrack not supperted --- tests/catchup.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/catchup.py b/tests/catchup.py index 6afafb44b..a83755c54 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1502,6 +1502,9 @@ def test_dry_run_catchup_ptrack(self): """ Test dry-run option for catchup in incremental ptrack mode """ + if not self.ptrack: + return unittest.skip('Skipped because ptrack support is disabled') + # preparation 1: source src_pg = self.make_simple_node( base_dir = os.path.join(module_name, self.fname, 'src'), From 448efc222232d993f5ed50307f67cb5fad37d1f1 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Fri, 15 Apr 2022 15:50:09 +0500 Subject: [PATCH 09/13] Revert the messages text to the original form --- src/catchup.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index 5084e012e..ddaa3afae 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -166,16 +166,14 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, source_id = get_system_identifier(source_pgdata, FIO_DB_HOST, false); /* same as instance_config.system_identifier */ if (source_conn_id != source_id) - elog(ERROR, "Database identifiers mismatch: we %s connected to DB id %lu, but in \"%s\" we found id %lu", - dry_run? "can":"will", + elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", source_conn_id, source_pgdata, source_id); if (current.backup_mode != BACKUP_MODE_FULL) { dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST, false); if (source_conn_id != dest_id) - elog(ERROR, "Database identifiers mismatch: we %s connected to DB id %lu, but in \"%s\" we found id %lu", - dry_run? "can":"will", + elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", source_conn_id, dest_pgdata, dest_id); } } @@ -831,7 +829,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char dirpath[MAXPGPATH]; join_path_components(dirpath, dest_pgdata, file->rel_path); - elog(VERBOSE, "Directory '%s' %s be created", dirpath, dry_run? "can":"will"); + elog(VERBOSE, "Create directory '%s'", dirpath); if (!dry_run) fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); } @@ -861,8 +859,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(to_path, dest_pgdata, file->rel_path); - elog(VERBOSE, "Directory \"%s\" and symbolic link \"%s\" %s be created", - linked_path, to_path, dry_run? "can":"will"); + elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + linked_path, to_path); if (!dry_run) { @@ -915,7 +913,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, */ if (current.backup_mode != BACKUP_MODE_FULL) { - elog(INFO, "Redundant files in destination directory %s be removed", dry_run ? "can" : "will"); + elog(INFO, "Removing redundant files in destination directory"); parray_qsort(dest_filelist, pgFileCompareRelPathWithExternalDesc); for (i = 0; i < parray_num(dest_filelist); i++) { @@ -947,7 +945,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (!dry_run) { fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); - elog(VERBOSE, "File \"%s\" %s deleted", fullpath, dry_run ? "can" : "will"); + elog(VERBOSE, "Deleted file \"%s\"", fullpath); } /* shrink dest pgdata list */ @@ -969,7 +967,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal); /* run copy threads */ - elog(INFO, "Transferring data files %s started", dry_run ? "can be" : ""); + elog(INFO, "Start transferring data files"); time(&start_time); transfered_datafiles_bytes = catchup_multithreaded_copy(num_threads, &source_node_info, source_pgdata, dest_pgdata, @@ -1103,8 +1101,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pretty_size(transfered_datafiles_bytes, pretty_transfered_data_bytes, lengthof(pretty_transfered_data_bytes)); pretty_size(transfered_walfiles_bytes, pretty_transfered_wal_bytes, lengthof(pretty_transfered_wal_bytes)); - elog(INFO, "Databases %s synchronized. Transfered datafiles sizes: %s, transfered wal size: %s, time elapsed: %s", - dry_run ? "can be" : "was", + elog(INFO, "Databases synchronized. Transfered datafiles size: %s, transfered wal size: %s, time elapsed: %s", pretty_transfered_data_bytes, pretty_transfered_wal_bytes, pretty_time); if (current.backup_mode != BACKUP_MODE_FULL) From 63760d4c63bddd5b43cfe5cf69c3ebbf77c0e7fd Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Thu, 5 May 2022 15:48:21 +0500 Subject: [PATCH 10/13] Fix review notes --- src/catchup.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/catchup.c b/src/catchup.c index ddaa3afae..bbf78419a 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -507,9 +507,9 @@ catchup_multithreaded_copy(int num_threads, /* Run threads */ thread_interrupted = false; threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); - for (i = 0; i < num_threads; i++) + if (!dry_run) { - if (!dry_run) + for (i = 0; i < num_threads; i++) { elog(VERBOSE, "Start thread num: %i", i); pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); @@ -788,9 +788,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Build the page map from ptrack information */ make_pagemap_from_ptrack_2(source_filelist, source_conn, - source_node_info.ptrack_schema, - source_node_info.ptrack_version_num, - dest_redo.lsn); + source_node_info.ptrack_schema, + source_node_info.ptrack_version_num, + dest_redo.lsn); time(&end_time); elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec", difftime(end_time, start_time)); @@ -945,8 +945,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (!dry_run) { fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); } + elog(VERBOSE, "Deleted file \"%s\"", fullpath); /* shrink dest pgdata list */ pgFileFree(file); @@ -1036,8 +1036,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, NULL); - free(stop_backup_result.backup_label_content); } + free(stop_backup_result.backup_label_content); stop_backup_result.backup_label_content = NULL; stop_backup_result.backup_label_content_len = 0; @@ -1120,8 +1120,8 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (dest_filelist && !dry_run) { parray_walk(dest_filelist, pgFileFree); - parray_free(dest_filelist); } + parray_free(dest_filelist); parray_walk(source_filelist, pgFileFree); parray_free(source_filelist); pgFileFree(source_pg_control_file); From 417992f174d83cb0840dad74be107bd49c850d75 Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Thu, 19 May 2022 13:39:44 +0500 Subject: [PATCH 11/13] Fix log message about WAL streaming with dry-run option --- src/catchup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/catchup.c b/src/catchup.c index bbf78419a..3c522afb7 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -717,7 +717,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, current.start_lsn, current.tli, false); } else - elog(INFO, "WAL streaming cannot be started with --dry-run option"); + elog(INFO, "WAL streaming skipping with --dry-run option"); source_filelist = parray_new(); From 76736ef1ea2b622aa5bb34219049c773f329ae4f Mon Sep 17 00:00:00 2001 From: "d.lepikhova" Date: Sun, 29 May 2022 14:59:44 +0500 Subject: [PATCH 12/13] Add dry-run into catchup option list for help command --- src/help.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/help.c b/src/help.c index ccffaea34..3385c72a1 100644 --- a/src/help.c +++ b/src/help.c @@ -261,6 +261,7 @@ help_pg_probackup(void) printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); + printf(_(" [--dry-run]\n")); printf(_(" [--help]\n")); if ((PROGRAM_URL || PROGRAM_EMAIL)) From f38feeb952d3dcdadedba4566a2d18b1d5fabc36 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 1 Jun 2022 10:44:46 +0300 Subject: [PATCH 13/13] [ci skip] revert .travis.yml --- .travis.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 70135fe99..663330918 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,11 +36,7 @@ env: - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE MODE=catchup - - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE MODE=catchup - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE MODE=catchup - - PG_VERSION=10 PG_BRANCH=REL_10_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=catchup +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=compression # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking