diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml
index 2bf197814..b1ddd0032 100644
--- a/doc/pgprobackup.xml
+++ b/doc/pgprobackup.xml
@@ -1162,7 +1162,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup;
- PTRACK versions lower than 2.0 are deprecated. Postgres Pro Standard and Postgres Pro Enterprise
+ PTRACK versions lower than 2.0 are deprecated and not supported. Postgres Pro Standard and Postgres Pro Enterprise
versions starting with 11.9.1 contain PTRACK 2.0. Upgrade your server to avoid issues in backups
that you will take in future and be sure to take fresh backups of your clusters with the upgraded
PTRACK since the backups taken with PTRACK 1.x might be corrupt.
@@ -1218,34 +1218,6 @@ CREATE EXTENSION ptrack;
-
- For older PostgreSQL versions,
- PTRACK required taking backups in the exclusive mode
- to provide exclusive access to bitmaps with changed blocks.
- To set up PTRACK backups for PostgreSQL 10
- or lower, do the following:
-
-
-
-
- Set the ptrack_enable parameter to
- on.
-
-
-
-
- Grant the right to execute PTRACK
- functions to the backup role
- in every database of the
- cluster:
-
-
-GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_clear() TO backup;
-GRANT EXECUTE ON FUNCTION pg_catalog.pg_ptrack_get_and_clear(oid, oid) TO backup;
-
-
-
-
diff --git a/src/backup.c b/src/backup.c
index 46e4f1ea7..738b6dcf2 100644
--- a/src/backup.c
+++ b/src/backup.c
@@ -125,10 +125,6 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
check_external_for_tablespaces(external_dirs, backup_conn);
}
- /* Clear ptrack files for not PTRACK backups */
- if (current.backup_mode != BACKUP_MODE_DIFF_PTRACK && nodeInfo->is_ptrack_enable)
- pg_ptrack_clear(backup_conn, nodeInfo->ptrack_version_num);
-
/* notify start of backup to PostgreSQL server */
time2iso(label, lengthof(label), current.start_time, false);
strncat(label, " with pg_probackup", lengthof(label) -
@@ -217,29 +213,14 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
{
XLogRecPtr ptrack_lsn = get_last_ptrack_lsn(backup_conn, nodeInfo);
- if (nodeInfo->ptrack_version_num < 200)
+ // new ptrack (>=2.0) is more robust and checks Start LSN
+ if (ptrack_lsn > prev_backup->start_lsn || ptrack_lsn == InvalidXLogRecPtr)
{
- // backward compatibility kludge: use Stop LSN for ptrack 1.x,
- if (ptrack_lsn > prev_backup->stop_lsn || ptrack_lsn == InvalidXLogRecPtr)
- {
- elog(ERROR, "LSN from ptrack_control %X/%X differs from Stop LSN of previous backup %X/%X.\n"
- "Create new full backup before an incremental one.",
- (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn),
- (uint32) (prev_backup->stop_lsn >> 32),
- (uint32) (prev_backup->stop_lsn));
- }
- }
- else
- {
- // new ptrack is more robust and checks Start LSN
- if (ptrack_lsn > prev_backup->start_lsn || ptrack_lsn == InvalidXLogRecPtr)
- {
- elog(ERROR, "LSN from ptrack_control %X/%X is greater than Start LSN of previous backup %X/%X.\n"
- "Create new full backup before an incremental one.",
- (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn),
- (uint32) (prev_backup->start_lsn >> 32),
- (uint32) (prev_backup->start_lsn));
- }
+ elog(ERROR, "LSN from ptrack_control %X/%X is greater than Start LSN of previous backup %X/%X.\n"
+ "Create new full backup before an incremental one.",
+ (uint32) (ptrack_lsn >> 32), (uint32) (ptrack_lsn),
+ (uint32) (prev_backup->start_lsn >> 32),
+ (uint32) (prev_backup->start_lsn));
}
}
@@ -407,15 +388,10 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
/*
* Build the page map from ptrack information.
*/
- if (nodeInfo->ptrack_version_num >= 200)
- make_pagemap_from_ptrack_2(backup_files_list, backup_conn,
- nodeInfo->ptrack_schema,
- nodeInfo->ptrack_version_num,
- prev_backup_start_lsn);
- else if (nodeInfo->ptrack_version_num == 105 ||
- nodeInfo->ptrack_version_num == 106 ||
- nodeInfo->ptrack_version_num == 107)
- make_pagemap_from_ptrack_1(backup_files_list, backup_conn);
+ make_pagemap_from_ptrack_2(backup_files_list, backup_conn,
+ nodeInfo->ptrack_schema,
+ nodeInfo->ptrack_version_num,
+ prev_backup_start_lsn);
}
time(&end_time);
@@ -490,8 +466,6 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn,
arg->files_list = backup_files_list;
arg->prev_filelist = prev_backup_filelist;
arg->prev_start_lsn = prev_backup_start_lsn;
- arg->conn_arg.conn = NULL;
- arg->conn_arg.cancel_conn = NULL;
arg->hdr_map = &(current.hdr_map);
arg->thread_num = i+1;
/* By default there are some error */
@@ -816,6 +790,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params,
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
{
+ /* ptrack_version_num < 2.0 was already checked in get_ptrack_version() */
if (nodeInfo.ptrack_version_num == 0)
elog(ERROR, "This PostgreSQL instance does not support ptrack");
else
@@ -2085,15 +2060,15 @@ backup_files(void *arg)
/* backup file */
if (file->is_datafile && !file->is_cfs)
{
- backup_data_file(&(arguments->conn_arg), file, from_fullpath, to_fullpath,
- arguments->prev_start_lsn,
- current.backup_mode,
- instance_config.compress_alg,
- instance_config.compress_level,
- arguments->nodeInfo->checksum_version,
- arguments->nodeInfo->ptrack_version_num,
- arguments->nodeInfo->ptrack_schema,
- arguments->hdr_map, false);
+ backup_data_file(file, from_fullpath, to_fullpath,
+ arguments->prev_start_lsn,
+ current.backup_mode,
+ instance_config.compress_alg,
+ instance_config.compress_level,
+ arguments->nodeInfo->checksum_version,
+ arguments->nodeInfo->ptrack_version_num,
+ arguments->nodeInfo->ptrack_schema,
+ arguments->hdr_map, false);
}
else
{
@@ -2117,10 +2092,6 @@ backup_files(void *arg)
/* ssh connection to longer needed */
fio_disconnect();
- /* Close connection */
- if (arguments->conn_arg.conn)
- pgut_disconnect(arguments->conn_arg.conn);
-
/* Data files transferring is successful */
arguments->ret = 0;
diff --git a/src/data.c b/src/data.c
index 9d8bfc584..314490585 100644
--- a/src/data.c
+++ b/src/data.c
@@ -276,8 +276,7 @@ get_checksum_errormsg(Page page, char **errormsg, BlockNumber absolute_blkno)
* return it to the caller
*/
static int32
-prepare_page(ConnectionArgs *conn_arg,
- pgFile *file, XLogRecPtr prev_backup_start_lsn,
+prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
BlockNumber blknum, FILE *in,
BackupMode backup_mode,
Page page, bool strict,
@@ -290,6 +289,7 @@ prepare_page(ConnectionArgs *conn_arg,
int try_again = PAGE_READ_ATTEMPTS;
bool page_is_valid = false;
BlockNumber absolute_blknum = file->segno * RELSEG_SIZE + blknum;
+ int rc = 0;
/* check for interrupt */
if (interrupted || thread_interrupted)
@@ -300,161 +300,97 @@ prepare_page(ConnectionArgs *conn_arg,
* Under high write load it's possible that we've read partly
* flushed page, so try several times before throwing an error.
*/
- if (backup_mode != BACKUP_MODE_DIFF_PTRACK || ptrack_version_num >= 200)
+ while (!page_is_valid && try_again--)
{
- int rc = 0;
- while (!page_is_valid && try_again--)
- {
- /* read the block */
- int read_len = fio_pread(in, page, blknum * BLCKSZ);
+ /* read the block */
+ int read_len = fio_pread(in, page, blknum * BLCKSZ);
- /* The block could have been truncated. It is fine. */
- if (read_len == 0)
- {
- elog(VERBOSE, "Cannot read block %u of \"%s\": "
- "block truncated", blknum, from_fullpath);
- return PageIsTruncated;
- }
- else if (read_len < 0)
- elog(ERROR, "Cannot read block %u of \"%s\": %s",
- blknum, from_fullpath, strerror(errno));
- else if (read_len != BLCKSZ)
- elog(WARNING, "Cannot read block %u of \"%s\": "
- "read %i of %d, try again",
- blknum, from_fullpath, read_len, BLCKSZ);
- else
+ /* The block could have been truncated. It is fine. */
+ if (read_len == 0)
+ {
+ elog(VERBOSE, "Cannot read block %u of \"%s\": "
+ "block truncated", blknum, from_fullpath);
+ return PageIsTruncated;
+ }
+ else if (read_len < 0)
+ elog(ERROR, "Cannot read block %u of \"%s\": %s",
+ blknum, from_fullpath, strerror(errno));
+ else if (read_len != BLCKSZ)
+ elog(WARNING, "Cannot read block %u of \"%s\": "
+ "read %i of %d, try again",
+ blknum, from_fullpath, read_len, BLCKSZ);
+ else
+ {
+ /* We have BLCKSZ of raw data, validate it */
+ rc = validate_one_page(page, absolute_blknum,
+ InvalidXLogRecPtr, page_st,
+ checksum_version);
+ switch (rc)
{
- /* We have BLCKSZ of raw data, validate it */
- rc = validate_one_page(page, absolute_blknum,
- InvalidXLogRecPtr, page_st,
- checksum_version);
- switch (rc)
- {
- case PAGE_IS_ZEROED:
- elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum);
+ case PAGE_IS_ZEROED:
+ elog(VERBOSE, "File: \"%s\" blknum %u, empty page", from_fullpath, blknum);
+ return PageIsOk;
+
+ case PAGE_IS_VALID:
+ /* in DELTA or PTRACK modes we must compare lsn */
+ if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK)
+ page_is_valid = true;
+ else
return PageIsOk;
-
- case PAGE_IS_VALID:
- /* in DELTA or PTRACK modes we must compare lsn */
- if (backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK)
- page_is_valid = true;
- else
- return PageIsOk;
- break;
-
- case PAGE_HEADER_IS_INVALID:
- elog(VERBOSE, "File: \"%s\" blknum %u have wrong page header, try again",
- from_fullpath, blknum);
- break;
-
- case PAGE_CHECKSUM_MISMATCH:
- elog(VERBOSE, "File: \"%s\" blknum %u have wrong checksum, try again",
- from_fullpath, blknum);
- break;
- default:
- Assert(false);
- }
+ break;
+
+ case PAGE_HEADER_IS_INVALID:
+ elog(VERBOSE, "File: \"%s\" blknum %u have wrong page header, try again",
+ from_fullpath, blknum);
+ break;
+
+ case PAGE_CHECKSUM_MISMATCH:
+ elog(VERBOSE, "File: \"%s\" blknum %u have wrong checksum, try again",
+ from_fullpath, blknum);
+ break;
+ default:
+ Assert(false);
}
}
-
- /*
- * If page is not valid after 100 attempts to read it
- * throw an error.
- */
- if (!page_is_valid)
- {
- int elevel = ERROR;
- char *errormsg = NULL;
-
- /* Get the details of corruption */
- if (rc == PAGE_HEADER_IS_INVALID)
- get_header_errormsg(page, &errormsg);
- else if (rc == PAGE_CHECKSUM_MISMATCH)
- get_checksum_errormsg(page, &errormsg,
- file->segno * RELSEG_SIZE + blknum);
-
- /* Error out in case of merge or backup without ptrack support;
- * issue warning in case of checkdb or backup with ptrack support
- */
- if (!strict)
- elevel = WARNING;
-
- if (errormsg)
- elog(elevel, "Corruption detected in file \"%s\", block %u: %s",
- from_fullpath, blknum, errormsg);
- else
- elog(elevel, "Corruption detected in file \"%s\", block %u",
- from_fullpath, blknum);
-
- pg_free(errormsg);
- return PageIsCorrupted;
- }
-
- /* Checkdb not going futher */
- if (!strict)
- return PageIsOk;
}
/*
- * Get page via ptrack interface from PostgreSQL shared buffer.
- * We do this only in the cases of PTRACK 1.x versions backup
+ * If page is not valid after PAGE_READ_ATTEMPTS attempts to read it
+ * throw an error.
*/
- if (backup_mode == BACKUP_MODE_DIFF_PTRACK
- && (ptrack_version_num >= 105 && ptrack_version_num < 200))
+ if (!page_is_valid)
{
- int rc = 0;
- size_t page_size = 0;
- Page ptrack_page = NULL;
- ptrack_page = (Page) pg_ptrack_get_block(conn_arg, file->dbOid, file->tblspcOid,
- file->relOid, absolute_blknum, &page_size,
- ptrack_version_num, ptrack_schema);
-
- if (ptrack_page == NULL)
- /* This block was truncated.*/
- return PageIsTruncated;
-
- if (page_size != BLCKSZ)
- elog(ERROR, "File: \"%s\", block %u, expected block size %d, but read %zu",
- from_fullpath, blknum, BLCKSZ, page_size);
-
- /*
- * We need to copy the page that was successfully
- * retrieved from ptrack into our output "page" parameter.
- */
- memcpy(page, ptrack_page, BLCKSZ);
- pg_free(ptrack_page);
-
- /*
- * UPD: It apprears that is possible to get zeroed page or page with invalid header
- * from shared buffer.
- * Note, that getting page with wrong checksumm from shared buffer is
- * acceptable.
- */
- rc = validate_one_page(page, absolute_blknum,
- InvalidXLogRecPtr, page_st,
- checksum_version);
-
- /* It is ok to get zeroed page */
- if (rc == PAGE_IS_ZEROED)
- return PageIsOk;
+ int elevel = ERROR;
+ char *errormsg = NULL;
- /* Getting page with invalid header from shared buffers is unacceptable */
+ /* Get the details of corruption */
if (rc == PAGE_HEADER_IS_INVALID)
- {
- char *errormsg = NULL;
get_header_errormsg(page, &errormsg);
- elog(ERROR, "Corruption detected in file \"%s\", block %u: %s",
- from_fullpath, blknum, errormsg);
- }
+ else if (rc == PAGE_CHECKSUM_MISMATCH)
+ get_checksum_errormsg(page, &errormsg,
+ file->segno * RELSEG_SIZE + blknum);
- /*
- * We must set checksum here, because it is outdated
- * in the block recieved from shared buffers.
+ /* Error out in case of merge or backup without ptrack support;
+ * issue warning in case of checkdb or backup with ptrack support
*/
- if (checksum_version)
- page_st->checksum = ((PageHeader) page)->pd_checksum = pg_checksum_page(page, absolute_blknum);
+ if (!strict)
+ elevel = WARNING;
+
+ if (errormsg)
+ elog(elevel, "Corruption detected in file \"%s\", block %u: %s",
+ from_fullpath, blknum, errormsg);
+ else
+ elog(elevel, "Corruption detected in file \"%s\", block %u",
+ from_fullpath, blknum);
+
+ pg_free(errormsg);
+ return PageIsCorrupted;
}
+ /* Checkdb not going futher */
+ if (!strict)
+ return PageIsOk;
+
/*
* Skip page if page lsn is less than START_LSN of parent backup.
* Nullified pages must be copied by DELTA backup, just to be safe.
@@ -531,8 +467,7 @@ compress_and_backup_page(pgFile *file, BlockNumber blknum,
* backup with special header.
*/
void
-backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
- const char *from_fullpath, const char *to_fullpath,
+backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
CompressAlg calg, int clevel, uint32 checksum_version,
int ptrack_version_num, const char *ptrack_schema,
@@ -614,7 +549,7 @@ backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
else
{
/* TODO: stop handling errors internally */
- rc = send_pages(conn_arg, to_fullpath, from_fullpath, file,
+ rc = send_pages(to_fullpath, from_fullpath, file,
/* send prev backup START_LSN */
(backup_mode == BACKUP_MODE_DIFF_DELTA || backup_mode == BACKUP_MODE_DIFF_PTRACK) &&
file->exists_in_prev ? prev_backup_start_lsn : InvalidXLogRecPtr,
@@ -1563,10 +1498,10 @@ check_data_file(ConnectionArgs *arguments, pgFile *file,
for (blknum = 0; blknum < nblocks; blknum++)
{
PageState page_st;
- page_state = prepare_page(NULL, file, InvalidXLogRecPtr,
- blknum, in, BACKUP_MODE_FULL,
- curr_page, false, checksum_version,
- 0, NULL, from_fullpath, &page_st);
+ page_state = prepare_page(file, InvalidXLogRecPtr,
+ blknum, in, BACKUP_MODE_FULL,
+ curr_page, false, checksum_version,
+ 0, NULL, from_fullpath, &page_st);
if (page_state == PageIsTruncated)
break;
@@ -1994,7 +1929,7 @@ open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size)
/* backup local file */
int
-send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_fullpath,
+send_pages(const char *to_fullpath, const char *from_fullpath,
pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel,
uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers,
BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema)
@@ -2052,11 +1987,11 @@ send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_f
while (blknum < file->n_blocks)
{
PageState page_st;
- int rc = prepare_page(conn_arg, file, prev_backup_start_lsn,
- blknum, in, backup_mode, curr_page,
- true, checksum_version,
- ptrack_version_num, ptrack_schema,
- from_fullpath, &page_st);
+ int rc = prepare_page(file, prev_backup_start_lsn,
+ blknum, in, backup_mode, curr_page,
+ true, checksum_version,
+ ptrack_version_num, ptrack_schema,
+ from_fullpath, &page_st);
if (rc == PageIsTruncated)
break;
diff --git a/src/dir.c b/src/dir.c
index dfcddd7d0..ce255d0ad 100644
--- a/src/dir.c
+++ b/src/dir.c
@@ -677,26 +677,16 @@ dir_check_file(pgFile *file, bool backup_logs)
*/
if (sscanf_res == 2 && strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) != 0)
return CHECK_FALSE;
-
- if (sscanf_res == 3 && S_ISDIR(file->mode) &&
- strcmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY) == 0)
- file->is_database = true;
}
else if (path_is_prefix_of_path("global", file->rel_path))
{
file->tblspcOid = GLOBALTABLESPACE_OID;
-
- if (S_ISDIR(file->mode) && strcmp(file->name, "global") == 0)
- file->is_database = true;
}
else if (path_is_prefix_of_path("base", file->rel_path))
{
file->tblspcOid = DEFAULTTABLESPACE_OID;
sscanf(file->rel_path, "base/%u/", &(file->dbOid));
-
- if (S_ISDIR(file->mode) && strcmp(file->name, "base") != 0)
- file->is_database = true;
}
/* Do not backup ptrack_init files */
diff --git a/src/merge.c b/src/merge.c
index 6e0e74940..e59b359fe 100644
--- a/src/merge.c
+++ b/src/merge.c
@@ -1253,7 +1253,7 @@ merge_data_file(parray *parent_chain, pgBackup *full_backup,
* 2 backups of old versions, where n_blocks is missing.
*/
- backup_data_file(NULL, tmp_file, to_fullpath_tmp1, to_fullpath_tmp2,
+ backup_data_file(tmp_file, to_fullpath_tmp1, to_fullpath_tmp2,
InvalidXLogRecPtr, BACKUP_MODE_FULL,
dest_backup->compress_alg, dest_backup->compress_level,
dest_backup->checksum_version, 0, NULL,
diff --git a/src/pg_probackup.h b/src/pg_probackup.h
index f5fd0f672..ccbf803fd 100644
--- a/src/pg_probackup.h
+++ b/src/pg_probackup.h
@@ -264,7 +264,6 @@ typedef struct pgFile
int segno; /* Segment number for ptrack */
int n_blocks; /* number of blocks in the data file in data directory */
bool is_cfs; /* Flag to distinguish files compressed by CFS*/
- bool is_database; /* Flag used strictly by ptrack 1.x backup */
int external_dir_num; /* Number of external directory. 0 if not external */
bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */
CompressAlg compress_alg; /* compression algorithm applied to the file */
@@ -589,7 +588,6 @@ typedef struct
parray *external_dirs;
XLogRecPtr prev_start_lsn;
- ConnectionArgs conn_arg;
int thread_num;
HeaderMap *hdr_map;
@@ -842,10 +840,6 @@ extern const char *deparse_backup_mode(BackupMode mode);
extern void process_block_change(ForkNumber forknum, RelFileNode rnode,
BlockNumber blkno);
-extern char *pg_ptrack_get_block(ConnectionArgs *arguments,
- Oid dbOid, Oid tblsOid, Oid relOid,
- BlockNumber blknum, size_t *result_size,
- int ptrack_version_num, const char *ptrack_schema);
/* in restore.c */
extern int do_restore_or_validate(InstanceState *instanceState,
time_t target_backup_id,
@@ -1067,12 +1061,11 @@ extern void pfilearray_clear_locks(parray *file_list);
extern bool check_data_file(ConnectionArgs *arguments, pgFile *file,
const char *from_fullpath, uint32 checksum_version);
-extern void backup_data_file(ConnectionArgs* conn_arg, pgFile *file,
- const char *from_fullpath, const char *to_fullpath,
- XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
- CompressAlg calg, int clevel, uint32 checksum_version,
- int ptrack_version_num, const char *ptrack_schema,
- HeaderMap *hdr_map, bool missing_ok);
+extern void backup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpath,
+ XLogRecPtr prev_backup_start_lsn, BackupMode backup_mode,
+ CompressAlg calg, int clevel, uint32 checksum_version,
+ int ptrack_version_num, const char *ptrack_schema,
+ HeaderMap *hdr_map, bool missing_ok);
extern void backup_non_data_file(pgFile *file, pgFile *prev_file,
const char *from_fullpath, const char *to_fullpath,
BackupMode backup_mode, time_t parent_backup_time,
@@ -1172,20 +1165,12 @@ extern void check_system_identifiers(PGconn *conn, char *pgdata);
extern void parse_filelist_filenames(parray *files, const char *root);
/* in ptrack.c */
-extern void make_pagemap_from_ptrack_1(parray* files, PGconn* backup_conn);
extern void make_pagemap_from_ptrack_2(parray* files, PGconn* backup_conn,
const char *ptrack_schema,
int ptrack_version_num,
XLogRecPtr lsn);
-extern void pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num);
extern void get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo);
extern bool pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num);
-extern bool pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn);
-extern char *pg_ptrack_get_and_clear(Oid tablespace_oid,
- Oid db_oid,
- Oid rel_oid,
- size_t *result_size,
- PGconn *backup_conn);
extern XLogRecPtr get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo);
extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema,
int ptrack_version_num, XLogRecPtr lsn);
@@ -1193,7 +1178,7 @@ extern parray * pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack
/* open local file to writing */
extern FILE* open_local_file_rw(const char *to_fullpath, char **out_buf, uint32 buf_size);
-extern int send_pages(ConnectionArgs* conn_arg, const char *to_fullpath, const char *from_fullpath,
+extern int send_pages(const char *to_fullpath, const char *from_fullpath,
pgFile *file, XLogRecPtr prev_backup_start_lsn, CompressAlg calg, int clevel,
uint32 checksum_version, bool use_pagemap, BackupPageHeader2 **headers,
BackupMode backup_mode, int ptrack_version_num, const char *ptrack_schema);
diff --git a/src/ptrack.c b/src/ptrack.c
index 5a2b9f046..6825686c6 100644
--- a/src/ptrack.c
+++ b/src/ptrack.c
@@ -2,7 +2,7 @@
*
* ptrack.c: support functions for ptrack backups
*
- * Copyright (c) 2019 Postgres Professional
+ * Copyright (c) 2021 Postgres Professional
*
*-------------------------------------------------------------------------
*/
@@ -21,124 +21,6 @@
#define PTRACK_BITS_PER_HEAPBLOCK 1
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / PTRACK_BITS_PER_HEAPBLOCK)
-/*
- * Given a list of files in the instance to backup, build a pagemap for each
- * data file that has ptrack. Result is saved in the pagemap field of pgFile.
- * NOTE we rely on the fact that provided parray is sorted by file->rel_path.
- */
-void
-make_pagemap_from_ptrack_1(parray *files, PGconn *backup_conn)
-{
- size_t i;
- Oid dbOid_with_ptrack_init = 0;
- Oid tblspcOid_with_ptrack_init = 0;
- char *ptrack_nonparsed = NULL;
- size_t ptrack_nonparsed_size = 0;
-
- for (i = 0; i < parray_num(files); i++)
- {
- pgFile *file = (pgFile *) parray_get(files, i);
- size_t start_addr;
-
- /*
- * If there is a ptrack_init file in the database,
- * we must backup all its files, ignoring ptrack files for relations.
- */
- if (file->is_database)
- {
- /*
- * The function pg_ptrack_get_and_clear_db returns true
- * if there was a ptrack_init file.
- * Also ignore ptrack files for global tablespace,
- * to avoid any possible specific errors.
- */
- if ((file->tblspcOid == GLOBALTABLESPACE_OID) ||
- pg_ptrack_get_and_clear_db(file->dbOid, file->tblspcOid, backup_conn))
- {
- dbOid_with_ptrack_init = file->dbOid;
- tblspcOid_with_ptrack_init = file->tblspcOid;
- }
- }
-
- if (file->is_datafile)
- {
- if (file->tblspcOid == tblspcOid_with_ptrack_init &&
- file->dbOid == dbOid_with_ptrack_init)
- {
- /* ignore ptrack if ptrack_init exists */
- elog(VERBOSE, "Ignoring ptrack because of ptrack_init for file: %s", file->rel_path);
- file->pagemap_isabsent = true;
- continue;
- }
-
- /* get ptrack bitmap once for all segments of the file */
- if (file->segno == 0)
- {
- /* release previous value */
- pg_free(ptrack_nonparsed);
- ptrack_nonparsed_size = 0;
-
- ptrack_nonparsed = pg_ptrack_get_and_clear(file->tblspcOid, file->dbOid,
- file->relOid, &ptrack_nonparsed_size, backup_conn);
- }
-
- if (ptrack_nonparsed != NULL)
- {
- /*
- * pg_ptrack_get_and_clear() returns ptrack with VARHDR cut out.
- * Compute the beginning of the ptrack map related to this segment
- *
- * HEAPBLOCKS_PER_BYTE. Number of heap pages one ptrack byte can track: 8
- * RELSEG_SIZE. Number of Pages per segment: 131072
- * RELSEG_SIZE/HEAPBLOCKS_PER_BYTE. number of bytes in ptrack file needed
- * to keep track on one relsegment: 16384
- */
- start_addr = (RELSEG_SIZE/HEAPBLOCKS_PER_BYTE)*file->segno;
-
- /*
- * If file segment was created after we have read ptrack,
- * we won't have a bitmap for this segment.
- */
- if (start_addr > ptrack_nonparsed_size)
- {
- elog(VERBOSE, "Ptrack is missing for file: %s", file->rel_path);
- file->pagemap_isabsent = true;
- }
- else
- {
-
- if (start_addr + RELSEG_SIZE/HEAPBLOCKS_PER_BYTE > ptrack_nonparsed_size)
- {
- file->pagemap.bitmapsize = ptrack_nonparsed_size - start_addr;
- elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
- }
- else
- {
- file->pagemap.bitmapsize = RELSEG_SIZE/HEAPBLOCKS_PER_BYTE;
- elog(VERBOSE, "pagemap size: %i", file->pagemap.bitmapsize);
- }
-
- file->pagemap.bitmap = pg_malloc(file->pagemap.bitmapsize);
- memcpy(file->pagemap.bitmap, ptrack_nonparsed+start_addr, file->pagemap.bitmapsize);
- }
- }
- else
- {
- /*
- * If ptrack file is missing, try to copy the entire file.
- * It can happen in two cases:
- * - files were created by commands that bypass buffer manager
- * and, correspondingly, ptrack mechanism.
- * i.e. CREATE DATABASE
- * - target relation was deleted.
- */
- elog(VERBOSE, "Ptrack is missing for file: %s", file->rel_path);
- file->pagemap_isabsent = true;
- }
- }
- }
-}
-
/*
* Parse a string like "2.1" into int
* result: int by formula major_number * 100 + minor_number
@@ -218,7 +100,7 @@ get_ptrack_version(PGconn *backup_conn, PGNodeInfo *nodeInfo)
nodeInfo->ptrack_version_num = ptrack_version_num;
/* ptrack 1.X is buggy, so fall back to DELTA backup strategy for safety */
- if (nodeInfo->ptrack_version_num >= 105 && nodeInfo->ptrack_version_num < 200)
+ if (nodeInfo->ptrack_version_num < 200)
{
if (current.backup_mode == BACKUP_MODE_DIFF_PTRACK)
{
@@ -241,12 +123,7 @@ pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num)
PGresult *res_db;
bool result = false;
- if (ptrack_version_num < 200)
- {
- res_db = pgut_execute(backup_conn, "SHOW ptrack_enable", 0, NULL);
- result = strcmp(PQgetvalue(res_db, 0, 0), "on") == 0;
- }
- else if (ptrack_version_num == 200)
+ if (ptrack_version_num == 200)
{
res_db = pgut_execute(backup_conn, "SHOW ptrack_map_size", 0, NULL);
result = strcmp(PQgetvalue(res_db, 0, 0), "0") != 0;
@@ -262,214 +139,6 @@ pg_ptrack_enable(PGconn *backup_conn, int ptrack_version_num)
return result;
}
-
-/* ----------------------------
- * Ptrack 1.* support functions
- * ----------------------------
- */
-
-/* Clear ptrack files in all databases of the instance we connected to */
-void
-pg_ptrack_clear(PGconn *backup_conn, int ptrack_version_num)
-{
- PGresult *res_db,
- *res;
- const char *dbname;
- int i;
- Oid dbOid, tblspcOid;
- char *params[2];
-
- // FIXME Perform this check on caller's side
- if (ptrack_version_num >= 200)
- return;
-
- params[0] = palloc(64);
- params[1] = palloc(64);
- res_db = pgut_execute(backup_conn, "SELECT datname, oid, dattablespace FROM pg_database",
- 0, NULL);
-
- for(i = 0; i < PQntuples(res_db); i++)
- {
- PGconn *tmp_conn;
-
- dbname = PQgetvalue(res_db, i, 0);
- if (strcmp(dbname, "template0") == 0)
- continue;
-
- dbOid = atoll(PQgetvalue(res_db, i, 1));
- tblspcOid = atoll(PQgetvalue(res_db, i, 2));
-
- tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport,
- dbname,
- instance_config.conn_opt.pguser);
-
- res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_clear()",
- 0, NULL);
- PQclear(res);
-
- sprintf(params[0], "%i", dbOid);
- sprintf(params[1], "%i", tblspcOid);
- res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
- 2, (const char **)params);
- PQclear(res);
-
- pgut_disconnect(tmp_conn);
- }
-
- pfree(params[0]);
- pfree(params[1]);
- PQclear(res_db);
-}
-
-bool
-pg_ptrack_get_and_clear_db(Oid dbOid, Oid tblspcOid, PGconn *backup_conn)
-{
- char *params[2];
- char *dbname;
- PGresult *res_db;
- PGresult *res;
- bool result;
-
- params[0] = palloc(64);
- params[1] = palloc(64);
-
- sprintf(params[0], "%i", dbOid);
- res_db = pgut_execute(backup_conn,
- "SELECT datname FROM pg_database WHERE oid=$1",
- 1, (const char **) params);
- /*
- * If database is not found, it's not an error.
- * It could have been deleted since previous backup.
- */
- if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
- return false;
-
- dbname = PQgetvalue(res_db, 0, 0);
-
- /* Always backup all files from template0 database */
- if (strcmp(dbname, "template0") == 0)
- {
- PQclear(res_db);
- return true;
- }
- PQclear(res_db);
-
- sprintf(params[0], "%i", dbOid);
- sprintf(params[1], "%i", tblspcOid);
- res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear_db($1, $2)",
- 2, (const char **)params);
-
- if (PQnfields(res) != 1)
- elog(ERROR, "cannot perform pg_ptrack_get_and_clear_db()");
-
- if (!parse_bool(PQgetvalue(res, 0, 0), &result))
- elog(ERROR,
- "result of pg_ptrack_get_and_clear_db() is invalid: %s",
- PQgetvalue(res, 0, 0));
-
- PQclear(res);
- pfree(params[0]);
- pfree(params[1]);
-
- return result;
-}
-
-/* Read and clear ptrack files of the target relation.
- * Result is a bytea ptrack map of all segments of the target relation.
- * case 1: we know a tablespace_oid, db_oid, and rel_filenode
- * case 2: we know db_oid and rel_filenode (no tablespace_oid, because file in pg_default)
- * case 3: we know only rel_filenode (because file in pg_global)
- */
-char *
-pg_ptrack_get_and_clear(Oid tablespace_oid, Oid db_oid, Oid rel_filenode,
- size_t *result_size, PGconn *backup_conn)
-{
- PGconn *tmp_conn;
- PGresult *res_db,
- *res;
- char *params[2];
- char *result;
- char *val;
-
- params[0] = palloc(64);
- params[1] = palloc(64);
-
- /* regular file (not in directory 'global') */
- if (db_oid != 0)
- {
- char *dbname;
-
- sprintf(params[0], "%i", db_oid);
- res_db = pgut_execute(backup_conn,
- "SELECT datname FROM pg_database WHERE oid=$1",
- 1, (const char **) params);
- /*
- * If database is not found, it's not an error.
- * It could have been deleted since previous backup.
- */
- if (PQntuples(res_db) != 1 || PQnfields(res_db) != 1)
- return NULL;
-
- dbname = PQgetvalue(res_db, 0, 0);
-
- if (strcmp(dbname, "template0") == 0)
- {
- PQclear(res_db);
- return NULL;
- }
-
- tmp_conn = pgut_connect(instance_config.conn_opt.pghost, instance_config.conn_opt.pgport,
- dbname,
- instance_config.conn_opt.pguser);
- sprintf(params[0], "%i", tablespace_oid);
- sprintf(params[1], "%i", rel_filenode);
- res = pgut_execute(tmp_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
- 2, (const char **)params);
-
- if (PQnfields(res) != 1)
- elog(ERROR, "cannot get ptrack file from database \"%s\" by tablespace oid %u and relation oid %u",
- dbname, tablespace_oid, rel_filenode);
- PQclear(res_db);
- pgut_disconnect(tmp_conn);
- }
- /* file in directory 'global' */
- else
- {
- /*
- * execute ptrack_get_and_clear for relation in pg_global
- * Use backup_conn, cause we can do it from any database.
- */
- sprintf(params[0], "%i", tablespace_oid);
- sprintf(params[1], "%i", rel_filenode);
- res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_get_and_clear($1, $2)",
- 2, (const char **)params);
-
- if (PQnfields(res) != 1)
- elog(ERROR, "cannot get ptrack file from pg_global tablespace and relation oid %u",
- rel_filenode);
- }
-
- val = PQgetvalue(res, 0, 0);
-
- /* TODO Now pg_ptrack_get_and_clear() returns bytea ending with \x.
- * It should be fixed in future ptrack releases, but till then we
- * can parse it.
- */
- if (strcmp("x", val+1) == 0)
- {
- /* Ptrack file is missing */
- return NULL;
- }
-
- result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
- result_size);
- PQclear(res);
- pfree(params[0]);
- pfree(params[1]);
-
- return result;
-}
-
/*
* Get lsn of the moment when ptrack was enabled the last time.
*/
@@ -482,20 +151,14 @@ get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo)
uint32 lsn_lo;
XLogRecPtr lsn;
- if (nodeInfo->ptrack_version_num < 200)
- res = pgut_execute(backup_conn, "SELECT pg_catalog.pg_ptrack_control_lsn()",
- 0, NULL);
- else
- {
- char query[128];
+ char query[128];
- if (nodeInfo->ptrack_version_num == 200)
- sprintf(query, "SELECT %s.pg_ptrack_control_lsn()", nodeInfo->ptrack_schema);
- else
- sprintf(query, "SELECT %s.ptrack_init_lsn()", nodeInfo->ptrack_schema);
+ if (nodeInfo->ptrack_version_num == 200)
+ sprintf(query, "SELECT %s.pg_ptrack_control_lsn()", nodeInfo->ptrack_schema);
+ else
+ sprintf(query, "SELECT %s.ptrack_init_lsn()", nodeInfo->ptrack_schema);
- res = pgut_execute(backup_conn, query, 0, NULL);
- }
+ res = pgut_execute(backup_conn, query, 0, NULL);
/* Extract timeline and LSN from results of pg_start_backup() */
XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo);
@@ -506,99 +169,6 @@ get_last_ptrack_lsn(PGconn *backup_conn, PGNodeInfo *nodeInfo)
return lsn;
}
-char *
-pg_ptrack_get_block(ConnectionArgs *arguments,
- Oid dbOid,
- Oid tblsOid,
- Oid relOid,
- BlockNumber blknum,
- size_t *result_size,
- int ptrack_version_num,
- const char *ptrack_schema)
-{
- PGresult *res;
- char *params[4];
- char *result;
-
- params[0] = palloc(64);
- params[1] = palloc(64);
- params[2] = palloc(64);
- params[3] = palloc(64);
-
- /*
- * Use tmp_conn, since we may work in parallel threads.
- * We can connect to any database.
- */
- sprintf(params[0], "%i", tblsOid);
- sprintf(params[1], "%i", dbOid);
- sprintf(params[2], "%i", relOid);
- sprintf(params[3], "%u", blknum);
-
- if (arguments->conn == NULL)
- {
- arguments->conn = pgut_connect(instance_config.conn_opt.pghost,
- instance_config.conn_opt.pgport,
- instance_config.conn_opt.pgdatabase,
- instance_config.conn_opt.pguser);
- }
-
- if (arguments->cancel_conn == NULL)
- arguments->cancel_conn = PQgetCancel(arguments->conn);
-
- // elog(LOG, "db %i pg_ptrack_get_block(%i, %i, %u)",dbOid, tblsOid, relOid, blknum);
-
- if (ptrack_version_num < 200)
- res = pgut_execute_parallel(arguments->conn,
- arguments->cancel_conn,
- "SELECT pg_catalog.pg_ptrack_get_block_2($1, $2, $3, $4)",
- 4, (const char **)params, true, false, false);
- else
- {
- char query[128];
-
- /* sanity */
- if (!ptrack_schema)
- elog(ERROR, "Schema name of ptrack extension is missing");
-
- if (ptrack_version_num == 200)
- sprintf(query, "SELECT %s.pg_ptrack_get_block($1, $2, $3, $4)", ptrack_schema);
- else
- elog(ERROR, "ptrack >= 2.1.0 does not support pg_ptrack_get_block()");
- // sprintf(query, "SELECT %s.ptrack_get_block($1, $2, $3, $4)", ptrack_schema);
-
- res = pgut_execute_parallel(arguments->conn,
- arguments->cancel_conn,
- query, 4, (const char **)params,
- true, false, false);
- }
-
- if (PQnfields(res) != 1)
- {
- elog(VERBOSE, "cannot get file block for relation oid %u",
- relOid);
- return NULL;
- }
-
- if (PQgetisnull(res, 0, 0))
- {
- elog(VERBOSE, "cannot get file block for relation oid %u",
- relOid);
- return NULL;
- }
-
- result = (char *) PQunescapeBytea((unsigned char *) PQgetvalue(res, 0, 0),
- result_size);
-
- PQclear(res);
-
- pfree(params[0]);
- pfree(params[1]);
- pfree(params[2]);
- pfree(params[3]);
-
- return result;
-}
-
/* ----------------------------
* Ptrack 2.* support functions
* ----------------------------
diff --git a/src/utils/file.c b/src/utils/file.c
index f341b7a37..389d4d113 100644
--- a/src/utils/file.c
+++ b/src/utils/file.c
@@ -48,7 +48,6 @@ typedef struct
size_t size;
time_t mtime;
bool is_datafile;
- bool is_database;
Oid tblspcOid;
Oid dbOid;
Oid relOid;
@@ -2574,7 +2573,6 @@ fio_list_dir_internal(parray *files, const char *root, bool exclude,
file->size = fio_file.size;
file->mtime = fio_file.mtime;
file->is_datafile = fio_file.is_datafile;
- file->is_database = fio_file.is_database;
file->tblspcOid = fio_file.tblspcOid;
file->dbOid = fio_file.dbOid;
file->relOid = fio_file.relOid;
@@ -2648,7 +2646,6 @@ fio_list_dir_impl(int out, char* buf)
fio_file.size = file->size;
fio_file.mtime = file->mtime;
fio_file.is_datafile = file->is_datafile;
- fio_file.is_database = file->is_database;
fio_file.tblspcOid = file->tblspcOid;
fio_file.dbOid = file->dbOid;
fio_file.relOid = file->relOid;
diff --git a/tests/auth_test.py b/tests/auth_test.py
index c84fdb981..78af21be9 100644
--- a/tests/auth_test.py
+++ b/tests/auth_test.py
@@ -190,9 +190,7 @@ def setUpClass(cls):
"GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; "
"GRANT EXECUTE ON FUNCTION txid_current() TO backup; "
"GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; "
- "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup; "
- "GRANT EXECUTE ON FUNCTION pg_ptrack_clear() TO backup; "
- "GRANT EXECUTE ON FUNCTION pg_ptrack_get_and_clear(oid, oid) TO backup;")
+ "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;")
cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass')
@classmethod
diff --git a/tests/backup.py b/tests/backup.py
index 53790ad03..8c537dbc3 100644
--- a/tests/backup.py
+++ b/tests/backup.py
@@ -2106,6 +2106,7 @@ def test_backup_with_least_privileges_role(self):
if self.ptrack:
if node.major_version < 12:
+ # Reviewer, NB: skip this test in case of old ptrack?
for fname in [
'pg_catalog.oideq(oid, oid)',
'pg_catalog.ptrack_version()',
diff --git a/tests/false_positive.py b/tests/false_positive.py
index d4e7ccf0d..a101f8107 100644
--- a/tests/false_positive.py
+++ b/tests/false_positive.py
@@ -107,192 +107,6 @@ def test_incremental_backup_corrupt_full_1(self):
# Clean after yourself
self.del_test_dir(module_name, fname)
- @unittest.expectedFailure
- def test_ptrack_concurrent_get_and_clear_1(self):
- """make node, make full and ptrack stream backups,"
- " restore them and check data correctness"""
-
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
- node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
- set_replication=True,
- ptrack_enable=True,
- initdb_params=['--data-checksums'])
-
- self.init_pb(backup_dir)
- self.add_instance(backup_dir, 'node', node)
- self.set_archiving(backup_dir, 'node', node)
- node.slow_start()
-
- node.safe_psql(
- "postgres",
- "create table t_heap as select i"
- " as id from generate_series(0,1) i"
- )
-
- self.backup_node(backup_dir, 'node', node, options=['--stream'])
- gdb = self.backup_node(
- backup_dir, 'node', node, backup_type='ptrack',
- options=['--stream'],
- gdb=True
- )
-
- gdb.set_breakpoint('make_pagemap_from_ptrack')
- gdb.run_until_break()
-
- node.safe_psql(
- "postgres",
- "update t_heap set id = 100500")
-
- tablespace_oid = node.safe_psql(
- "postgres",
- "select oid from pg_tablespace where spcname = 'pg_default'").rstrip()
-
- relfilenode = node.safe_psql(
- "postgres",
- "select 't_heap'::regclass::oid").rstrip()
-
- node.safe_psql(
- "postgres",
- "SELECT pg_ptrack_get_and_clear({0}, {1})".format(
- tablespace_oid, relfilenode))
-
- gdb.continue_execution_until_exit()
-
- self.backup_node(
- backup_dir, 'node', node,
- backup_type='ptrack', options=['--stream']
- )
- if self.paranoia:
- pgdata = self.pgdata_content(node.data_dir)
-
- result = node.safe_psql("postgres", "SELECT * FROM t_heap")
- node.cleanup()
- self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
-
- # Physical comparison
- if self.paranoia:
- pgdata_restored = self.pgdata_content(
- node.data_dir, ignore_ptrack=False)
- self.compare_pgdata(pgdata, pgdata_restored)
-
- node.slow_start()
- # Logical comparison
- self.assertEqual(
- result,
- node.safe_psql("postgres", "SELECT * FROM t_heap"))
-
- # Clean after yourself
- self.del_test_dir(module_name, fname)
-
- @unittest.expectedFailure
- def test_ptrack_concurrent_get_and_clear_2(self):
- """make node, make full and ptrack stream backups,"
- " restore them and check data correctness"""
-
- if not self.ptrack:
- return unittest.skip('Skipped because ptrack support is disabled')
-
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
- node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
- set_replication=True,
- ptrack_enable=True,
- initdb_params=['--data-checksums'])
-
- self.init_pb(backup_dir)
- self.add_instance(backup_dir, 'node', node)
- self.set_archiving(backup_dir, 'node', node)
- node.slow_start()
-
- node.safe_psql(
- "postgres",
- "create table t_heap as select i"
- " as id from generate_series(0,1) i"
- )
-
- self.backup_node(backup_dir, 'node', node, options=['--stream'])
- gdb = self.backup_node(
- backup_dir, 'node', node, backup_type='ptrack',
- options=['--stream'],
- gdb=True
- )
-
- gdb.set_breakpoint('pthread_create')
- gdb.run_until_break()
-
- node.safe_psql(
- "postgres",
- "update t_heap set id = 100500")
-
- tablespace_oid = node.safe_psql(
- "postgres",
- "select oid from pg_tablespace "
- "where spcname = 'pg_default'").rstrip()
-
- relfilenode = node.safe_psql(
- "postgres",
- "select 't_heap'::regclass::oid").rstrip()
-
- node.safe_psql(
- "postgres",
- "SELECT pg_ptrack_get_and_clear({0}, {1})".format(
- tablespace_oid, relfilenode))
-
- gdb._execute("delete breakpoints")
- gdb.continue_execution_until_exit()
-
- try:
- self.backup_node(
- backup_dir, 'node', node,
- backup_type='ptrack', options=['--stream']
- )
- # we should die here because exception is what we expect to happen
- self.assertEqual(
- 1, 0,
- "Expecting Error because of LSN mismatch from ptrack_control "
- "and previous backup ptrack_lsn.\n"
- " Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd))
- except ProbackupException as e:
- self.assertTrue(
- 'ERROR: LSN from ptrack_control' in e.message,
- '\n Unexpected Error Message: {0}\n CMD: {1}'.format(
- repr(e.message), self.cmd))
-
- if self.paranoia:
- pgdata = self.pgdata_content(node.data_dir)
-
- result = node.safe_psql("postgres", "SELECT * FROM t_heap")
- node.cleanup()
- self.restore_node(backup_dir, 'node', node, options=["-j", "4"])
-
- # Physical comparison
- if self.paranoia:
- pgdata_restored = self.pgdata_content(
- node.data_dir, ignore_ptrack=False)
- self.compare_pgdata(pgdata, pgdata_restored)
-
- node.slow_start()
- # Logical comparison
- self.assertEqual(
- result,
- node.safe_psql("postgres", "SELECT * FROM t_heap")
- )
-
- # Clean after yourself
- self.del_test_dir(module_name, fname)
-
# @unittest.skip("skip")
@unittest.expectedFailure
def test_pg_10_waldir(self):
diff --git a/tests/ptrack.py b/tests/ptrack.py
index 011f8754a..fb530a691 100644
--- a/tests/ptrack.py
+++ b/tests/ptrack.py
@@ -14,6 +14,10 @@
class PtrackTest(ProbackupTest, unittest.TestCase):
+ def setUp(self):
+ if self.pg_config_version < self.version_to_num('11.0'):
+ return unittest.skip('You need PostgreSQL >= 11 for this test')
+ self.fname = self.id().split('.')[3]
# @unittest.skip("skip")
def test_ptrack_stop_pg(self):
@@ -22,10 +26,9 @@ def test_ptrack_stop_pg(self):
restart node, check that ptrack backup
can be taken
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -52,7 +55,7 @@ def test_ptrack_stop_pg(self):
backup_type='ptrack', options=['--stream'])
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_multi_timeline_backup(self):
@@ -60,10 +63,9 @@ def test_ptrack_multi_timeline_backup(self):
t2 /------P2
t1 ------F---*-----P1
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -130,7 +132,7 @@ def test_ptrack_multi_timeline_backup(self):
self.assertEqual('0', balance)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_multi_timeline_backup_1(self):
@@ -142,10 +144,9 @@ def test_ptrack_multi_timeline_backup_1(self):
t2 /------P2
t1 ---F--------*
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -206,17 +207,16 @@ def test_ptrack_multi_timeline_backup_1(self):
self.assertEqual('0', balance)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_eat_my_data(self):
"""
PGPRO-4051
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -236,7 +236,7 @@ def test_ptrack_eat_my_data(self):
self.backup_node(backup_dir, 'node', node)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum'])
@@ -287,16 +287,15 @@ def test_ptrack_eat_my_data(self):
'Data loss')
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_simple(self):
"""make node, make full and ptrack stream backups,"
" restore them and check data correctness"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -335,7 +334,7 @@ def test_ptrack_simple(self):
result = node.safe_psql("postgres", "SELECT * FROM t_heap")
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -358,15 +357,14 @@ def test_ptrack_simple(self):
node_restored.safe_psql("postgres", "SELECT * FROM t_heap"))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_unprivileged(self):
""""""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -484,7 +482,8 @@ def test_ptrack_unprivileged(self):
)
if node.major_version < 11:
- fnames = [
+ # Reviewer, NB: skip this test in case of old ptrack?
+ self.fnames = [
'pg_catalog.oideq(oid, oid)',
'pg_catalog.ptrack_version()',
'pg_catalog.pg_ptrack_clear()',
@@ -494,7 +493,7 @@ def test_ptrack_unprivileged(self):
'pg_catalog.pg_ptrack_get_block_2(oid, oid, oid, bigint)'
]
- for fname in fnames:
+ for self.fname in self.fnames:
node.safe_psql(
"backupdb",
"GRANT EXECUTE ON FUNCTION {0} TO backup".format(fname))
@@ -536,10 +535,9 @@ def test_ptrack_unprivileged(self):
# @unittest.expectedFailure
def test_ptrack_enable(self):
"""make ptrack without full backup, should result in error"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True, initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30s',
@@ -577,7 +575,7 @@ def test_ptrack_enable(self):
)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
@@ -587,10 +585,9 @@ def test_ptrack_disable(self):
enable ptrack, restart postgresql, take ptrack backup
which should fail
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -650,15 +647,14 @@ def test_ptrack_disable(self):
)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_uncommitted_xact(self):
"""make ptrack backup while there is uncommitted open transaction"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -689,7 +685,7 @@ def test_ptrack_uncommitted_xact(self):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -710,16 +706,15 @@ def test_ptrack_uncommitted_xact(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_full(self):
"""make node, make full and ptrack stream backups,
restore them and check data correctness"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -773,7 +768,7 @@ def test_ptrack_vacuum_full(self):
process.join()
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
old_tablespace = self.get_tblspace_path(node, 'somedata')
@@ -797,7 +792,7 @@ def test_ptrack_vacuum_full(self):
node_restored.slow_start()
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_truncate(self):
@@ -805,10 +800,9 @@ def test_ptrack_vacuum_truncate(self):
delete last 3 pages, vacuum relation,
take ptrack backup, take second ptrack backup,
restore last ptrack backup and check data correctness"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -856,7 +850,7 @@ def test_ptrack_vacuum_truncate(self):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
old_tablespace = self.get_tblspace_path(node, 'somedata')
@@ -882,16 +876,17 @@ def test_ptrack_vacuum_truncate(self):
node_restored.slow_start()
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_get_block(self):
- """make node, make full and ptrack stream backups,"
- " restore them and check data correctness"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ """
+ make node, make full and ptrack stream backups,
+ restore them and check data correctness
+ """
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -900,11 +895,9 @@ def test_ptrack_get_block(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- self.skipTest("skip --- we do not need ptrack_get_block for ptrack 2.*")
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
node.safe_psql(
"postgres",
@@ -917,10 +910,7 @@ def test_ptrack_get_block(self):
options=['--stream'],
gdb=True)
- if node.major_version > 11:
- gdb.set_breakpoint('make_pagemap_from_ptrack_2')
- else:
- gdb.set_breakpoint('make_pagemap_from_ptrack_1')
+ gdb.set_breakpoint('make_pagemap_from_ptrack_2')
gdb.run_until_break()
node.safe_psql(
@@ -950,21 +940,18 @@ def test_ptrack_get_block(self):
# Logical comparison
self.assertEqual(
result,
- node.safe_psql("postgres", "SELECT * FROM t_heap")
- )
+ node.safe_psql("postgres", "SELECT * FROM t_heap"))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_stream(self):
"""make node, make full and ptrack stream backups,
restore them and check data correctness"""
- self.maxDiff = None
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -975,10 +962,9 @@ def test_ptrack_stream(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
node.safe_psql("postgres", "create sequence t_seq")
@@ -1045,17 +1031,15 @@ def test_ptrack_stream(self):
self.assertEqual(ptrack_result, ptrack_result_new)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_archive(self):
"""make archive node, make full and ptrack backups,
check data correctness in restored instance"""
- self.maxDiff = None
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1067,10 +1051,9 @@ def test_ptrack_archive(self):
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
node.safe_psql(
@@ -1158,20 +1141,18 @@ def test_ptrack_archive(self):
node.cleanup()
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
+ @unittest.skip("skip")
def test_ptrack_pgpro417(self):
- """Make node, take full backup, take ptrack backup,
- delete ptrack backup. Try to take ptrack backup,
- which should fail. Actual only for PTRACK 1.x"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ """
+ Make node, take full backup, take ptrack backup,
+ delete ptrack backup. Try to take ptrack backup,
+ which should fail. Actual only for PTRACK 1.x
+ """
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1238,22 +1219,18 @@ def test_ptrack_pgpro417(self):
repr(e.message), self.cmd))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
+ @unittest.skip("skip")
def test_page_pgpro417(self):
"""
Make archive node, take full backup, take page backup,
delete page backup. Try to take ptrack backup, which should fail.
Actual only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1308,22 +1285,18 @@ def test_page_pgpro417(self):
repr(e.message), self.cmd))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
+ @unittest.skip("skip")
def test_full_pgpro417(self):
"""
Make node, take two full backups, delete full second backup.
Try to take ptrack backup, which should fail.
Relevant only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1384,7 +1357,7 @@ def test_full_pgpro417(self):
repr(e.message), self.cmd))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_create_db(self):
@@ -1392,10 +1365,9 @@ def test_create_db(self):
Make node, take full backup, create database db1, take ptrack backup,
restore database and check it presense
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1406,10 +1378,9 @@ def test_create_db(self):
self.add_instance(backup_dir, 'node', node)
node.slow_start()
- if node.major_version >= 11:
- node.safe_psql(
- "postgres",
- "CREATE EXTENSION ptrack")
+ node.safe_psql(
+ "postgres",
+ "CREATE EXTENSION ptrack")
# FULL BACKUP
node.safe_psql(
@@ -1439,7 +1410,7 @@ def test_create_db(self):
# RESTORE
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -1501,7 +1472,7 @@ def test_create_db(self):
repr(e.message), self.cmd))
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_create_db_on_replica(self):
@@ -1511,10 +1482,9 @@ def test_create_db_on_replica(self):
create database db1, take ptrack backup from replica,
restore database and check it presense
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1537,7 +1507,7 @@ def test_create_db_on_replica(self):
"md5(i::text)::tsvector as tsvector from generate_series(0,100) i")
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.backup_node(
@@ -1590,7 +1560,7 @@ def test_create_db_on_replica(self):
# RESTORE
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -1604,16 +1574,15 @@ def test_create_db_on_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_alter_table_set_tablespace_ptrack(self):
"""Make node, create tablespace with table, take full backup,
alter tablespace location, take ptrack backup, restore database."""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1661,7 +1630,7 @@ def test_alter_table_set_tablespace_ptrack(self):
# RESTORE
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -1696,17 +1665,16 @@ def test_alter_table_set_tablespace_ptrack(self):
# self.assertEqual(result, result_new, 'lost some data after restore')
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_alter_database_set_tablespace_ptrack(self):
"""Make node, create tablespace with database,"
" take full backup, alter tablespace location,"
" take ptrack backup, restore database."""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1744,7 +1712,7 @@ def test_alter_database_set_tablespace_ptrack(self):
# RESTORE
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
backup_dir, 'node',
@@ -1766,7 +1734,7 @@ def test_alter_database_set_tablespace_ptrack(self):
node_restored.slow_start()
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_drop_tablespace(self):
@@ -1774,10 +1742,9 @@ def test_drop_tablespace(self):
Make node, create table, alter table tablespace, take ptrack backup,
move table from tablespace, take ptrack backup
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1862,7 +1829,7 @@ def test_drop_tablespace(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_alter_tablespace(self):
@@ -1870,10 +1837,9 @@ def test_ptrack_alter_tablespace(self):
Make node, create table, alter table tablespace, take ptrack backup,
move table from tablespace, take ptrack backup
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -1920,7 +1886,7 @@ def test_ptrack_alter_tablespace(self):
# Restore ptrack backup
restored_node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'restored_node'))
+ base_dir=os.path.join(module_name, self.fname, 'restored_node'))
restored_node.cleanup()
tblspc_path_new = self.get_tblspace_path(
restored_node, 'somedata_restored')
@@ -1979,7 +1945,7 @@ def test_ptrack_alter_tablespace(self):
self.assertEqual(result, result_new)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_multiple_segments(self):
@@ -1987,10 +1953,9 @@ def test_ptrack_multiple_segments(self):
Make node, create table, alter table tablespace,
take ptrack backup, move table from tablespace, take ptrack backup
"""
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -2056,7 +2021,7 @@ def test_ptrack_multiple_segments(self):
# RESTORE NODE
restored_node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'restored_node'))
+ base_dir=os.path.join(module_name, self.fname, 'restored_node'))
restored_node.cleanup()
tblspc_path = self.get_tblspace_path(node, 'somedata')
tblspc_path_new = self.get_tblspace_path(
@@ -2090,28 +2055,23 @@ def test_ptrack_multiple_segments(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
- # @unittest.expectedFailure
+ @unittest.skip("skip")
def test_atexit_fail(self):
"""
Take backups of every available types and check that PTRACK is clean.
Relevant only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
pg_options={
'max_connections': '15'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -2147,26 +2107,22 @@ def test_atexit_fail(self):
"f")
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
+ @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_clean(self):
"""
Take backups of every available types and check that PTRACK is clean
Relevant only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -2259,29 +2215,24 @@ def test_ptrack_clean(self):
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
- # @unittest.expectedFailure
+ @unittest.skip("skip")
def test_ptrack_clean_replica(self):
"""
Take backups of every available types from
master and check that PTRACK on replica is clean.
Relevant only for PTRACK 1.x
"""
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
pg_options={
'archive_timeout': '30s'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -2289,7 +2240,7 @@ def test_ptrack_clean_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -2402,18 +2353,17 @@ def test_ptrack_clean_replica(self):
self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size'])
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_cluster_on_btree(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -2467,18 +2417,17 @@ def test_ptrack_cluster_on_btree(self):
self.check_ptrack_map_sanity(node, idx_ptrack)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_gist(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -2540,18 +2489,17 @@ def test_ptrack_cluster_on_gist(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_btree_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -2564,7 +2512,7 @@ def test_ptrack_cluster_on_btree_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -2628,7 +2576,7 @@ def test_ptrack_cluster_on_btree_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'))
+ base_dir=os.path.join(module_name, self.fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', node)
@@ -2637,17 +2585,16 @@ def test_ptrack_cluster_on_btree_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_cluster_on_gist_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True)
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -2660,7 +2607,7 @@ def test_ptrack_cluster_on_gist_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -2730,7 +2677,7 @@ def test_ptrack_cluster_on_gist_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'))
+ base_dir=os.path.join(module_name, self.fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', node)
@@ -2740,20 +2687,19 @@ def test_ptrack_cluster_on_gist_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_empty(self):
"""Take backups of every available types and check that PTRACK is clean"""
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -2792,7 +2738,7 @@ def test_ptrack_empty(self):
node.safe_psql('postgres', 'checkpoint')
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
tblspace1 = self.get_tblspace_path(node, 'somedata')
@@ -2818,7 +2764,7 @@ def test_ptrack_empty(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
@@ -2827,14 +2773,13 @@ def test_ptrack_empty_replica(self):
Take backups of every available types from master
and check that PTRACK on replica is clean
"""
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
initdb_params=['--data-checksums'],
ptrack_enable=True)
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -2847,7 +2792,7 @@ def test_ptrack_empty_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -2903,7 +2848,7 @@ def test_ptrack_empty_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -2915,19 +2860,18 @@ def test_ptrack_empty_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_truncate(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -2998,13 +2942,12 @@ def test_ptrack_truncate(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_basic_ptrack_truncate_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -3013,7 +2956,7 @@ def test_basic_ptrack_truncate_replica(self):
'archive_timeout': '10s',
'checkpoint_timeout': '5min'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3026,7 +2969,7 @@ def test_basic_ptrack_truncate_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3108,7 +3051,7 @@ def test_basic_ptrack_truncate_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'))
+ base_dir=os.path.join(module_name, self.fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir)
@@ -3127,19 +3070,18 @@ def test_basic_ptrack_truncate_replica(self):
'select 1')
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -3215,20 +3157,19 @@ def test_ptrack_vacuum(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
pg_options={
'checkpoint_timeout': '30'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3241,7 +3182,7 @@ def test_ptrack_vacuum_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3314,7 +3255,7 @@ def test_ptrack_vacuum_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'))
+ base_dir=os.path.join(module_name, self.fname, 'node'))
node.cleanup()
self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir)
@@ -3323,19 +3264,18 @@ def test_ptrack_vacuum_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_bits_frozen(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -3403,18 +3343,17 @@ def test_ptrack_vacuum_bits_frozen(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_ptrack_vacuum_bits_frozen_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3427,7 +3366,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3502,19 +3441,18 @@ def test_ptrack_vacuum_bits_frozen_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_bits_visibility(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -3582,18 +3520,17 @@ def test_ptrack_vacuum_bits_visibility(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True)
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -3661,19 +3598,18 @@ def test_ptrack_vacuum_full(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_full_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3685,7 +3621,7 @@ def test_ptrack_vacuum_full_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3763,19 +3699,18 @@ def test_ptrack_vacuum_full_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -3832,7 +3767,7 @@ def test_ptrack_vacuum_truncate(self):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(backup_dir, 'node', node_restored)
@@ -3841,19 +3776,18 @@ def test_ptrack_vacuum_truncate(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_vacuum_truncate_replica(self):
- fname = self.id().split('.')[3]
master = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'master'),
+ base_dir=os.path.join(module_name, self.fname, 'master'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'master', master)
master.slow_start()
@@ -3866,7 +3800,7 @@ def test_ptrack_vacuum_truncate_replica(self):
self.backup_node(backup_dir, 'master', master, options=['--stream'])
replica = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'replica'))
+ base_dir=os.path.join(module_name, self.fname, 'replica'))
replica.cleanup()
self.restore_node(backup_dir, 'master', replica)
@@ -3938,7 +3872,7 @@ def test_ptrack_vacuum_truncate_replica(self):
pgdata = self.pgdata_content(replica.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(backup_dir, 'replica', node_restored)
@@ -3947,22 +3881,21 @@ def test_ptrack_vacuum_truncate_replica(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
- # @unittest.skip("skip")
- # @unittest.expectedFailure
+ @unittest.skip("skip")
def test_ptrack_recovery(self):
- if self.pg_config_version > self.version_to_num('11.0'):
- return unittest.skip('You need PostgreSQL =< 11 for this test')
-
- fname = self.id().split('.')[3]
+ """
+ Check that ptrack map contain correct bits after recovery.
+ Actual only for PTRACK 1.x
+ """
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -4009,17 +3942,13 @@ def test_ptrack_recovery(self):
self.check_ptrack_recovery(idx_ptrack[i])
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_recovery_1(self):
- if self.pg_config_version < self.version_to_num('12.0'):
- return unittest.skip('You need PostgreSQL >= 12 for this test')
-
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -4027,7 +3956,7 @@ def test_ptrack_recovery_1(self):
'shared_buffers': '512MB',
'max_wal_size': '3GB'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -4067,9 +3996,9 @@ def test_ptrack_recovery_1(self):
'postgres',
"create extension pg_buffercache")
- print(node.safe_psql(
- 'postgres',
- "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
+ #print(node.safe_psql(
+ # 'postgres',
+ # "SELECT count(*) FROM pg_buffercache WHERE isdirty"))
if self.verbose:
print('Killing postmaster. Losing Ptrack changes')
@@ -4088,7 +4017,7 @@ def test_ptrack_recovery_1(self):
pgdata = self.pgdata_content(node.data_dir)
node_restored = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node_restored'))
+ base_dir=os.path.join(module_name, self.fname, 'node_restored'))
node_restored.cleanup()
self.restore_node(
@@ -4098,19 +4027,18 @@ def test_ptrack_recovery_1(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_zero_changes(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -4144,14 +4072,13 @@ def test_ptrack_zero_changes(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_ptrack_pg_resetxlog(self):
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'],
@@ -4159,7 +4086,7 @@ def test_ptrack_pg_resetxlog(self):
'shared_buffers': '512MB',
'max_wal_size': '3GB'})
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -4259,7 +4186,7 @@ def test_ptrack_pg_resetxlog(self):
# pgdata = self.pgdata_content(node.data_dir)
#
# node_restored = self.make_simple_node(
-# base_dir=os.path.join(module_name, fname, 'node_restored'))
+# base_dir=os.path.join(module_name, self.fname, 'node_restored'))
# node_restored.cleanup()
#
# self.restore_node(
@@ -4269,23 +4196,18 @@ def test_ptrack_pg_resetxlog(self):
# self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
def test_corrupt_ptrack_map(self):
-
- if self.pg_config_version < self.version_to_num('12.0'):
- return unittest.skip('You need PostgreSQL >= 12 for this test')
-
- fname = self.id().split('.')[3]
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
node.slow_start()
@@ -4388,11 +4310,8 @@ def test_corrupt_ptrack_map(self):
node.stop(['-m', 'immediate', '-D', node.data_dir])
self.set_auto_conf(node, {'ptrack.map_size': '32', 'shared_preload_libraries': 'ptrack'})
-
node.slow_start()
- sleep(1)
-
try:
self.backup_node(
backup_dir, 'node', node,
@@ -4410,8 +4329,6 @@ def test_corrupt_ptrack_map(self):
'\n Unexpected Error Message: {0}\n'
' CMD: {1}'.format(repr(e.message), self.cmd))
- sleep(1)
-
self.backup_node(
backup_dir, 'node', node,
backup_type='delta', options=['--stream'])
@@ -4435,26 +4352,21 @@ def test_corrupt_ptrack_map(self):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
# @unittest.skip("skip")
def test_horizon_lsn_ptrack(self):
"""
https://github.com/postgrespro/pg_probackup/pull/386
"""
-
- if self.pg_config_version < self.version_to_num('11.0'):
- return unittest.skip("You need PostgreSQL >= 11 for this test")
-
self.assertLessEqual(
self.version_to_num(self.old_probackup_version),
self.version_to_num('2.4.15'),
'You need pg_probackup old_binary =< 2.4.15 for this test')
- fname = self.id().split('.')[3]
- backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
+ backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup')
node = self.make_simple_node(
- base_dir=os.path.join(module_name, fname, 'node'),
+ base_dir=os.path.join(module_name, self.fname, 'node'),
set_replication=True,
ptrack_enable=True,
initdb_params=['--data-checksums'])
@@ -4510,4 +4422,4 @@ def test_horizon_lsn_ptrack(self):
self.assertEqual(delta_bytes, ptrack_bytes)
# Clean after yourself
- self.del_test_dir(module_name, fname)
+ self.del_test_dir(module_name, self.fname)
diff --git a/tests/restore.py b/tests/restore.py
index 61aae9285..cc45a3a1a 100644
--- a/tests/restore.py
+++ b/tests/restore.py
@@ -3299,6 +3299,7 @@ def test_missing_database_map(self):
if self.ptrack:
fnames = []
if node.major_version < 12:
+ # Reviewer, NB: skip this test in case of old ptrack?
fnames += [
'pg_catalog.oideq(oid, oid)',
'pg_catalog.ptrack_version()',
@@ -3313,7 +3314,6 @@ def test_missing_database_map(self):
# fnames += [
# 'pg_ptrack_get_pagemapset(pg_lsn)',
# 'pg_ptrack_control_lsn()',
-# 'pg_ptrack_get_block(oid, oid, oid, bigint)'
# ]
node.safe_psql(
"backupdb",