diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6f99d0f27..c3ad89568 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -72,8 +72,7 @@ jobs: - name: Install Testgres run: | git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git - cd testgres - python setup.py install + pip3 install psycopg2 ./testgres # Grant the Github runner user full control of the workspace for initdb to successfully process the data folder - name: Test Probackup diff --git a/LICENSE b/LICENSE index 0ba831507..66476e8a9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015-2020, Postgres Professional +Copyright (c) 2015-2023, Postgres Professional Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group diff --git a/README.md b/README.md index 7486a6ca6..2279b97a4 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14, 15; +* PostgreSQL 11, 12, 13, 14, 15, 16 As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -41,9 +41,9 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp ## ptrack support `PTRACK` backup support provided via following options: -* vanilla PostgreSQL 11, 12, 13, 14, 15 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 11, 12, 13, 14 -* Postgres Pro Enterprise 11, 12, 13, 14 +* vanilla PostgreSQL 11, 12, 13, 14, 15, 16 with [ptrack extension](https://github.com/postgrespro/ptrack) +* Postgres Pro Standard 11, 12, 13, 14, 15, 16 +* Postgres Pro Enterprise 11, 12, 13, 14, 15, 16 ## Limitations @@ -69,117 +69,12 @@ For detailed release plans check [Milestones](https://github.com/postgrespro/pg_ Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/latest). ### Linux Installation -#### pg_probackup for vanilla PostgreSQL -```shell -#DEB Ubuntu|Debian Packages -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11,10} -sudo apt-get install pg-probackup-{15,14,13,12,11,10}-dbg - -#DEB-SRC Packages -sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ - /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{15,14,13,12,11,10} - -#DEB Astra Linix Orel -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{15,14,13,12,11,10}{-dbg,} - -#RPM Centos Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo - -#RPM RHEL Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo - -#RPM Oracle Linux Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{15,14,13,12,11,10} -yum install pg_probackup-{15,14,13,12,11,10}-debuginfo - -#SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{15,14,13,12,11,10} - -#RPM SUSE|SLES Packages -zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{15,14,13,12,11,10} -zypper install pg_probackup-{15,14,13,12,11,10}-debuginfo - -#SRPM SUSE|SLES Packages -zypper si pg_probackup-{15,14,13,12,11,10} - -#RPM ALT Linux 8 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo - -#RPM ALT Linux 9 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo - -#RPM ALT Linux 10 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p10 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{15,14,13,12,11,10} -sudo apt-get install pg_probackup-{15,14,13,12,11,10}-debuginfo -``` -#### pg_probackup for PostgresPro Standard and Enterprise -```shell -#DEB Ubuntu|Debian Packages -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{14,13,12,11,10,9.6}-dbg - -#DEB Astra Linix Orel -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} - - -#RPM Centos Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM RHEL Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM Oracle Linux Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 7 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' -sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 8 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' -sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 9 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{14,13,12,11,10,9.6}-debuginfo -``` +See the [Installation](https://postgrespro.github.io/pg_probackup/#pbk-install) section in the documentation. + +Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-setup). -Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). +For users of Postgres Pro products, commercial editions of pg_probackup are available for installation from the corresponding Postgres Pro product repository. ## Building from source ### Linux @@ -199,7 +94,7 @@ cd && git clone https://github.com/postgrespro/ ### Windows Currently pg_probackup can be build using only MSVC 2013. -Build PostgreSQL using [pgwininstall](https://github.com/postgrespro/pgwininstall) or [PostgreSQL instruction](https://www.postgresql.org/docs/10/install-windows-full.html) with MSVC 2013. +Build PostgreSQL using [pgwininstall](https://github.com/postgrespro/pgwininstall) or [PostgreSQL instruction](https://www.postgresql.org/docs/current/install-windows-full.html) with MSVC 2013. If zlib support is needed, src/tools/msvc/config.pl must contain path to directory with compiled zlib. [Example](https://gist.githubusercontent.com/gsmol/80989f976ce9584824ae3b1bfb00bd87/raw/240032950d4ac4801a79625dd00c8f5d4ed1180c/gistfile1.txt) ```shell diff --git a/doc/Readme.md b/doc/Readme.md index 756c6aaa0..0e1d64590 100644 --- a/doc/Readme.md +++ b/doc/Readme.md @@ -3,3 +3,6 @@ xmllint --noout --valid probackup.xml xsltproc stylesheet.xsl probackup.xml >pg-probackup.html ``` +> [!NOTE] +>Install ```docbook-xsl``` if you got +>``` "xsl:import : unable to load http://docbook.sourceforge.net/release/xsl/current/xhtml/docbook.xsl"``` \ No newline at end of file diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 2cb10e379..10e766239 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -2,7 +2,6 @@ doc/src/sgml/pgprobackup.sgml &project; documentation --> - pg_probackup @@ -164,7 +163,7 @@ doc/src/sgml/pgprobackup.sgml recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. - pg_probackup supports PostgreSQL 9.5 or higher. + pg_probackup supports PostgreSQL 11 or higher. @@ -172,7 +171,13 @@ doc/src/sgml/pgprobackup.sgml Overview - Installation and Setup + Quick Start + + + Installation + + + Setup Command-Line Reference @@ -413,7 +418,7 @@ doc/src/sgml/pgprobackup.sgml - On Unix systems, for PostgreSQL 10 or lower, + On Unix systems, for PostgreSQL 11, a backup can be made only by the same OS user that has started the PostgreSQL server. For example, if PostgreSQL server is started by user postgres, the backup command must also be run @@ -450,9 +455,568 @@ doc/src/sgml/pgprobackup.sgml - - - Codestin Search App + + Codestin Search App + + To quickly get started with pg_probackup, complete the steps below. This will set up FULL and DELTA backups in the remote mode and demonstrate some + basic pg_probackup operations. In the following, these terms are used: + + + + + backupPostgreSQL + role used to connect to the PostgreSQL + cluster. + + + + + backupdb — database used to connect to the + PostgreSQL cluster. + + + + + backup_host — host with the backup catalog. + + + + + backup_user — user on + backup_host running all pg_probackup + operations. + + + + + /mnt/backups — directory on + backup_host where the backup catalog is stored. + + + + + postgres_host — host with the + PostgreSQL cluster. + + + + + postgres — user on + postgres_host under which + PostgreSQL cluster processes are running. + + + + + /var/lib/postgresql/16/main — + PostgreSQL data directory on + postgres_host. + + + + + Codestin Search App + + + Install pg_probackup on both backup_host and postgres_host. + + + Set up an SSH connection from backup_host to postgres_host. + + + Configure your database cluster for STREAM backups. + + + Initialize the backup catalog: + +backup_user@backup_host:~$ pg_probackup-16 init -B /mnt/backups +INFO: Backup catalog '/mnt/backups' successfully initialized + + + + Add a backup instance called mydb to the backup catalog: + +backup_user@backup_host:~$ pg_probackup-16 add-instance \ + -B /mnt/backups \ + -D /var/lib/pgpro/std-16/data \ + --instance=node \ + --remote-host=postgres_host \ + --remote-user=postgres +INFO: Instance 'node' successfully initialized + + + + Make a FULL backup: + +backup_user@backup_host:~$ pg_probackup-16 backup \ + -B /mnt/backups \ + -b FULL \ + --instance=node \ + --stream \ + --compress-algorithm=zlib \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN1Q, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN1Q/database/pg_wal/000000010000000000000008 to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/8000028, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 1s +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/800BBD0 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 1s +INFO: Validating backup SCUN1Q +INFO: Backup SCUN1Q data files are valid +INFO: Backup SCUN1Q resident size: 56MB +INFO: Backup SCUN1Q completed + + + + List the backups of the instance: + +backup_user@backup_host:~$ pg_probackup-16 show \ + -B /mnt/backups \ + --instance=node +================================================================================================================================ + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +================================================================================================================================ + node 16 SCUN1Q 2024-05-02 11:17:53+03 FULL STREAM 1/0 12s 40MB 16MB 2.42 0/8000028 0/800BBD0 OK + + + + Make an incremental backup in the DELTA mode: + +backup_user@backup_host:~$ pg_probackup-16 backup \ + -B /mnt/backups \ + -b DELTA \ + --instance=node \ + --stream \ + --compress-algorithm=zlib \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN22, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Parent backup: SCUN1Q +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN22/database/pg_wal/000000010000000000000009 to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/9000028, TLI: 1 +INFO: Parent Start LSN: 0/8000028, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 1s +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/9000168 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 1s +INFO: Validating backup SCUN22 +INFO: Backup SCUN22 data files are valid +INFO: Backup SCUN22 resident size: 34MB +INFO: Backup SCUN22 completed + + + + Add or modify some parameters in the pg_probackup + configuration file, so that you do not have to specify them each time on the command line: + +backup_user@backup_host:~$ pg_probackup-16 set-config \ + -B /mnt/backups \ + --instance=node \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb + + + + Check the configuration of the instance: + +backup_user@backup_host:~$ pg_probackup-16 show-config \ + -B /mnt/backups \ + --instance=node +# Backup instance information +pgdata = /var/lib/pgpro/std-16/data +system-identifier = 7364313570668255886 +xlog-seg-size = 16777216 +# Connection parameters +pgdatabase = backupdb +pghost = postgres_host +pguser = backup +# Replica parameters +replica-timeout = 5min +# Archive parameters +archive-timeout = 5min +# Logging parameters +log-level-console = INFO +log-level-file = OFF +log-format-console = PLAIN +log-format-file = PLAIN +log-filename = pg_probackup.log +log-rotation-size = 0TB +log-rotation-age = 0d +# Retention parameters +retention-redundancy = 0 +retention-window = 0 +wal-depth = 0 +# Compression parameters +compress-algorithm = none +compress-level = 1 +# Remote access parameters +remote-proto = ssh +remote-host = postgres_host +remote-user = postgres + + + Note that the parameters not modified via set-config retain their default values. + + + + Make another incremental backup in the DELTA mode, omitting + the parameters stored in the configuration file earlier: + +backup_user@backup_host:~$ pg_probackup-16 backup \ + -B /mnt/backups \ + -b DELTA \ + --instance=node \ + --stream \ + --compress-algorithm=zlib +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN2C, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Parent backup: SCUN22 +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN2C/database/pg_wal/00000001000000000000000B to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/B000028, TLI: 1 +INFO: Parent Start LSN: 0/9000028, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 0 +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/B000168 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 0 +INFO: Validating backup SCUN2C +INFO: Backup SCUN2C data files are valid +INFO: Backup SCUN2C resident size: 17MB +INFO: Backup SCUN2C completed + + + + List the backups of the instance again: + +backup_user@backup_host:~$ pg_probackup-16 show \ + -B /mnt/backups \ + --instance=node +=================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +=================================================================================================================================== + node 16 SCUN2C 2024-05-02 11:18:13+03 DELTA STREAM 1/1 10s 1139kB 16MB 1.00 0/B000028 0/B000168 OK + node 16 SCUN22 2024-05-02 11:18:04+03 DELTA STREAM 1/1 10s 2357kB 32MB 1.02 0/9000028 0/9000168 OK + node 16 SCUN1Q 2024-05-02 11:17:53+03 FULL STREAM 1/0 12s 40MB 16MB 2.42 0/8000028 0/800BBD0 OK + + + + Restore the data from the latest available backup to an arbitrary location: + +backup_user@backup_host:~$ pg_probackup-16 restore \ + -B /mnt/backups \ + -D /var/lib/pgpro/std-16/staging-data \ + --instance=node +INFO: Validating parents for backup SCUN2C +INFO: Validating backup SCUN1Q +INFO: Backup SCUN1Q data files are valid +INFO: Validating backup SCUN22 +INFO: Backup SCUN22 data files are valid +INFO: Validating backup SCUN2C +INFO: Backup SCUN2C data files are valid +INFO: Backup SCUN2C WAL segments are valid +INFO: Backup SCUN2C is valid. +INFO: Restoring the database from backup SCUN2C on localhost +INFO: Start restoring backup files. PGDATA size: 112MB +INFO: Backup files are restored. Transfered bytes: 112MB, time elapsed: 0 +INFO: Restore incremental ratio (less is better): 100% (112MB/112MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 2s +INFO: Restore of backup SCUN2C completed. + + + + + + + Codestin Search App + + Codestin Search App + + You may need to use apt-get instead of apt on older systems in the commands below. + + + + + Add the pg_probackup repository GPG key + + +sudo apt install gpg wget +wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | \ +sudo tee /etc/apt/trusted.gpg.d/pg_probackup.asc + + + + + Setup the binary package repository + + +. /etc/os-release +echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb $VERSION_CODENAME main-$VERSION_CODENAME" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + + Optionally setup the source package repository for rebuilding the binaries + + +echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb $VERSION_CODENAME main-$VERSION_CODENAME" | \ +sudo tee -a /etc/apt/sources.list.d/pg_probackup.list + + + + + List the available pg_probackup packages + + + + + Using apt: + + +sudo apt update +apt search pg_probackup + + + + + Using apt-get: + + +sudo apt-get update +apt-cache search pg_probackup + + + + + + + Install or upgrade a pg_probackup version of your choice + + +sudo apt install pg-probackup-16 + + + + + Optionally install the debug package + + +sudo apt install pg-probackup-16-dbg + + + + + Optionally install the source package (provided you have set up the source package repository as described above) + + +sudo apt install dpkg-dev +sudo apt source pg-probackup-16 + + + + + + Codestin Search App + + You may need to use yum instead of dnf on older systems in the commands below. + + + + + Install the pg_probackup repository + + +dnf install https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm + + + + + List the available pg_probackup packages + + +dnf search pg_probackup + + + + + Install or upgrade a pg_probackup version of your choice + + +dnf install pg_probackup-16 + + + + + Optionally install the debug package + + +dnf install pg_probackup-16-debuginfo + + + + + Optionally install the source package for rebuilding the binaries + + + + + Using dnf: + + +dnf install 'dnf-command(download)' +dnf download --source pg_probackup-16 + + + + + Using yum: + + +yumdownloader --source pg_probackup-16 + + + + + + + + Codestin Search App + + + + Setup the repository + + + + + On ALT Linux 10: + + +. /etc/os-release +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p$VERSION_ID x86_64 vanilla" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + + On ALT Linux 8 and 9: + + +. /etc/os-release +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-$VERSION_ID x86_64 vanilla" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + + + + List the available pg_probackup packages + + +sudo apt-get update +apt-cache search pg_probackup + + + + + Install or upgrade a pg_probackup version of your choice + + +sudo apt-get install pg_probackup-16 + + + + + Optionally install the debug package + + +sudo apt-get install pg_probackup-16-debuginfo + + + + + + Codestin Search App + + + + Add the pg_probackup repository GPG key + + +zypper in -y gpg wget +wget -O GPG-KEY-PG_PROBACKUP https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP +rpm --import GPG-KEY-PG_PROBACKUP + + + + + Setup the repository + + +zypper in https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm + + + + + List the available pg_probackup packages + + +zypper se pg_probackup + + + + + Install or upgrade a pg_probackup version of your choice + + +zypper in pg_probackup-16 + + + + + Optionally install the source package for rebuilding the binaries + + +zypper si pg_probackup-16 + + + + + + + Codestin Search App Once you have pg_probackup installed, complete the following setup: @@ -534,10 +1098,10 @@ pg_probackup init -B backup_dir To add a new backup instance, run the following command: -pg_probackup add-instance -B backup_dir -D data_dir --instance instance_name [remote_options] +pg_probackup add-instance -B backup_dir -D data_dir --instance=instance_name [remote_options] - where: + Where: @@ -569,10 +1133,9 @@ pg_probackup add-instance -B backup_dir -D backups/instance_name directory contains the pg_probackup.conf configuration file that controls - pg_probackup settings for this backup instance. If you run this - command with the - remote_options, the specified - parameters will be added to pg_probackup.conf. + pg_probackup settings for this backup instance. To add + remote_options to the configuration file, use the + command. For details on how to fine-tune pg_probackup configuration, see @@ -607,53 +1170,21 @@ pg_probackup add-instance -B backup_dir -D - To perform a , the following - permissions for role backup are required - only in the database used for - connection to the PostgreSQL server: - - - For PostgreSQL 9.5: + For security reasons, it is recommended to run the configuration SQL queries below + in a separate database. -BEGIN; -CREATE ROLE backup WITH LOGIN; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -COMMIT; +postgres=# CREATE DATABASE backupdb; +postgres=# \c backupdb - For PostgreSQL 9.6: + To perform a , the following + permissions for role backup are required + only in the database used for + connection to the PostgreSQL server. - -BEGIN; -CREATE ROLE backup WITH LOGIN; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; -COMMIT; - - For PostgreSQL versions 10 — 14: + For PostgreSQL versions 11 — 14: BEGIN; @@ -742,7 +1273,18 @@ COMMIT; - Grant the REPLICATION privilege to the backup role: + If the backup role does not exist, create it with + the REPLICATION privilege when + Configuring the + Database Cluster: + + +CREATE ROLE backup WITH LOGIN REPLICATION; + + + + + If the backup role already exists, grant it with the REPLICATION privilege: ALTER ROLE backup WITH REPLICATION; @@ -831,7 +1373,7 @@ ALTER ROLE backup WITH REPLICATION; parameter, as follows: -archive_command = '"install_dir/pg_probackup" archive-push -B "backup_dir" --instance instance_name --wal-file-name=%f [remote_options]' +archive_command = '"install_dir/pg_probackup" archive-push -B "backup_dir" --instance=instance_name --wal-file-name=%f [remote_options]' @@ -901,7 +1443,7 @@ archive_command = '"install_dir/pg_probackup" archive Codestin Search App - For PostgreSQL 9.6 or higher, pg_probackup can take backups from + pg_probackup can take backups from a standby server. This requires the following additional setup: @@ -1024,12 +1566,12 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Codestin Search App - pg_probackup supports the remote mode that allows to perform - backup, restore and WAL archiving operations remotely. In this - mode, the backup catalog is stored on a local system, while - PostgreSQL instance to backup and/or to restore is located on a - remote system. Currently the only supported remote protocol is - SSH. + pg_probackup supports the remote mode that + allows you to perform backup, restore and WAL archiving operations remotely. + In this mode, the backup catalog is stored on a local system, while + PostgreSQL instance to backup and/or to restore + is located on a remote system. Currently the only supported remote + protocol is SSH. Codestin Search App @@ -1037,73 +1579,84 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; If you are going to use pg_probackup in remote mode via SSH, complete the following steps: - + Install pg_probackup on both systems: backup_host and - db_host. + postgres_host. - For communication between the hosts set up the passwordless - SSH connection between backup user on - backup_host and + For communication between the hosts set up a passwordless + SSH connection between the backup_user user on + backup_host and the postgres user on - db_host: + postgres_host: -[backup@backup_host] ssh-copy-id postgres@db_host +[backup_user@backup_host] ssh-copy-id postgres@postgres_host + + Where: + + + + + backup_host is the system with + backup catalog. + + + + + postgres_host is the system with the PostgreSQL + cluster. + + + + + backup_user is the OS user on + backup_host used to run pg_probackup. + + + + + postgres is the user on + postgres_host under which + PostgreSQL cluster processes are running. + For PostgreSQL 11 or higher a + more secure approach can be used thanks to + allow-group-access feature. + + + If you are going to rely on continuous - WAL archiving, set up passwordless SSH - connection between postgres user on - db_host and backup + WAL archiving, set up a passwordless SSH + connection between the postgres user on + postgres_host and the backup user on backup_host: -[postgres@db_host] ssh-copy-id backup@backup_host +[postgres@postgres_host] ssh-copy-id backup_user@backup_host - - - where: - - - backup_host is the system with - backup catalog. + Make sure pg_probackup on postgres_host + can be located when a connection via SSH is made. For example, for Bash, you can + modify PATH in ~/.bashrc of the postgres user + (above the line in bashrc that exits the script for non-interactive shells). + Alternatively, for pg_probackup commands, specify the path to the directory + containing the pg_probackup binary on postgres_host via + the --remote-path option. - - - db_host is the system with PostgreSQL - cluster. - - - - - backup is the OS user on - backup_host used to run pg_probackup. - - - - - postgres is the OS user on - db_host used to start the PostgreSQL - cluster. For PostgreSQL 11 or higher a - more secure approach can be used thanks to - allow-group-access - feature. - - - + pg_probackup in the remote mode via SSH works as follows: @@ -1153,10 +1706,10 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; The main process is usually started on backup_host and connects to - db_host, but in case of + postgres_host, but in case of archive-push and archive-get commands the main process - is started on db_host and connects to + is started on postgres_host and connects to backup_host. @@ -1177,7 +1730,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Compression is always done on - db_host, while decompression is always done on + postgres_host, while decompression is always done on backup_host. @@ -1269,7 +1822,7 @@ CREATE EXTENSION ptrack; To create a backup, run the following command: -pg_probackup backup -B backup_dir --instance instance_name -b backup_mode +pg_probackup backup -B backup_dir --instance=instance_name -b backup_mode Where backup_mode can take one of the @@ -1295,7 +1848,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL +pg_probackup backup -B backup_dir --instance=instance_name -b FULL ARCHIVE backups rely on @@ -1325,7 +1878,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --stream --temp-slot +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --stream --temp-slot The optional flag ensures that @@ -1418,7 +1971,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --external-dirs=/etc/dir1:/etc/dir2 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --external-dirs=/etc/dir1:/etc/dir2 Similarly, to include C:\dir1 and @@ -1426,7 +1979,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --external-dirs=C:\dir1;C:\dir2 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --external-dirs=C:\dir1;C:\dir2 pg_probackup recursively copies the contents @@ -1454,7 +2007,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup checkdb [-B backup_dir [--instance instance_name]] [-D data_dir] [connection_options] +pg_probackup checkdb [-B backup_dir [--instance=instance_name]] [-D data_dir] [connection_options] @@ -1552,7 +2105,7 @@ pg_probackup checkdb --amcheck --skip-block-validation [connection_ this command: -pg_probackup validate -B backup_dir --instance instance_name --recovery-target-xid=4242 +pg_probackup validate -B backup_dir --instance=instance_name --recovery-target-xid=4242 If validation completes successfully, pg_probackup displays the @@ -1572,11 +2125,11 @@ pg_probackup validate -B backup_dir --instance For example, to check that you can restore the database cluster - from a backup copy with the PT8XFX backup ID up to the + from a backup copy with the SCUN2C backup ID up to the specified timestamp, run this command: - -pg_probackup validate -B backup_dir --instance instance_name -i PT8XFX --recovery-target-time="2017-05-18 14:18:11+03" + +pg_probackup validate -B backup_dir --instance=instance_name -i SCUN2C --recovery-target-time="2024-05-03 11:18:13+03" If you specify the backup_id of an incremental backup, @@ -1594,10 +2147,10 @@ pg_probackup validate -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name -i backup_id +pg_probackup restore -B backup_dir --instance=instance_name -i backup_id - where: + Where: @@ -1641,7 +2194,7 @@ pg_probackup restore -B backup_dir --instance primary_conninfo parameter; you have to add the password manually or use the --primary-conninfo option, if required. - For PostgreSQL 11 or lower, + For PostgreSQL 11, recovery settings are written into the recovery.conf file. Starting from PostgreSQL 12, pg_probackup writes these settings into @@ -1678,7 +2231,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name -D data_dir -j 4 -i backup_id -T tablespace1_dir=tablespace1_newdir -T tablespace2_dir=tablespace2_newdir +pg_probackup restore -B backup_dir --instance=instance_name -D data_dir -j 4 -i backup_id -T tablespace1_dir=tablespace1_newdir -T tablespace2_dir=tablespace2_newdir @@ -1710,7 +2263,7 @@ pg_probackup restore -B backup_dir --instance command with the following options: -pg_probackup restore -B backup_dir --instance instance_name -D data_dir -I incremental_mode +pg_probackup restore -B backup_dir --instance=instance_name -D data_dir -I incremental_mode Where incremental_mode can take one of the @@ -1729,7 +2282,7 @@ pg_probackup restore -B backup_dir --instance LSN — read the pg_control in the - data directory to obtain redo LSN and redo TLI, which allows + data directory to obtain redo LSN and redo TLI, which allows you to determine a point in history(shiftpoint), where data directory state shifted from target backup chain history. If shiftpoint is not within reach of backup chain history, then restore is aborted. @@ -1770,29 +2323,36 @@ pg_probackup restore -B backup_dir --instance - -============================================================================================================================================= - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -============================================================================================================================================= - node 12 QBRNBP 2020-06-11 17:40:58+03 DELTA ARCHIVE 16/15 40s 194MB 16MB 8.26 15/2C000028 15/2D000128 OK - node 12 QBRIDX 2020-06-11 15:51:42+03 PAGE ARCHIVE 15/15 11s 18MB 16MB 5.10 14/DC000028 14/DD0000B8 OK - node 12 QBRIAJ 2020-06-11 15:51:08+03 PAGE ARCHIVE 15/15 20s 141MB 96MB 6.22 14/D4BABFE0 14/DA9871D0 OK - node 12 QBRHT8 2020-06-11 15:45:56+03 FULL ARCHIVE 15/0 2m:11s 1371MB 416MB 10.93 14/9D000028 14/B782E9A0 OK - -pg_probackup restore -B /backup --instance node -R -I lsn -INFO: Running incremental restore into nonempty directory: "/var/lib/pgsql/12/data" -INFO: Destination directory redo point 15/2E000028 on tli 16 is within reach of backup QBRIDX with Stop LSN 14/DD0000B8 on tli 15 -INFO: shift LSN: 14/DD0000B8 -INFO: Restoring the database from backup at 2020-06-11 17:40:58+03 -INFO: Extracting the content of destination directory for incremental restore -INFO: Destination directory content extracted, time elapsed: 1s -INFO: Removing redundant files in destination directory -INFO: Redundant files are removed, time elapsed: 1s -INFO: Start restoring backup files. PGDATA size: 15GB -INFO: Backup files are restored. Transfered bytes: 1693MB, time elapsed: 43s -INFO: Restore incremental ratio (less is better): 11% (1693MB/15GB) -INFO: Restore of backup QBRNBP completed. - + +====================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +====================================================================================================================================== + node 16 SCUN3Y 2024-05-02 11:19:16+03 DELTA STREAM 16/15 7s 92MB 208MB 2.27 0/3C0043A8 0/46159C70 OK + node 16 SCUN3M 2024-05-02 11:19:01+03 PTRACK STREAM 15/15 10s 30MB 16MB 2.23 0/32000028 0/32005ED0 OK + node 16 SCUN39 2024-05-02 11:18:50+03 PAGE STREAM 15/15 12s 46MB 32MB 1.44 0/2A000028 0/2B0000B8 OK + node 16 SCUN2V 2024-05-02 11:18:38+03 FULL STREAM 15/0 11s 154MB 16MB 2.32 0/23000028 0/23000168 OK + +backup_user@backup_host:~$ pg_probackup-16 restore -B /mnt/backups --instance=node -R -I lsn +INFO: Destination directory and tablespace directories are empty, disable incremental restore +INFO: Validating parents for backup SCUN3Y +INFO: Validating backup SCUN2V +INFO: Backup SCUN2V data files are valid +INFO: Validating backup SCUN39 +INFO: Backup SCUN39 data files are valid +INFO: Validating backup SCUN3M +INFO: Backup SCUN3M data files are valid +INFO: Validating backup SCUN3Y +INFO: Backup SCUN3Y data files are valid +INFO: Backup SCUN3Y WAL segments are valid +INFO: Backup SCUN3Y is valid. +INFO: Restoring the database from backup SCUN3Y +INFO: Start restoring backup files. PGDATA size: 759MB +INFO: Backup files are restored. Transfered bytes: 759MB, time elapsed: 3s +INFO: Restore incremental ratio (less is better): 100% (759MB/759MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 1s +INFO: Restore of backup SCUN3Y completed. + Incremental restore is possible only for backups with @@ -1816,7 +2376,7 @@ INFO: Restore of backup QBRNBP completed. with the following options: -pg_probackup restore -B backup_dir --instance instance_name --db-include=database_name +pg_probackup restore -B backup_dir --instance=instance_name --db-include=database_name The option can be specified @@ -1825,14 +2385,14 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --db-include=db1 --db-include=db2 +pg_probackup restore -B backup_dir --instance=instance_name --db-include=db1 --db-include=db2 To exclude one or more databases from restore, use the option: -pg_probackup restore -B backup_dir --instance instance_name --db-exclude=database_name +pg_probackup restore -B backup_dir --instance=instance_name --db-exclude=database_name The option can be specified @@ -1841,7 +2401,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --db-exclude=db1 --db-exclude=db2 +pg_probackup restore -B backup_dir --instance=instance_name --db-exclude=db1 --db-exclude=db2 Partial restore relies on lax behavior of PostgreSQL recovery @@ -1903,7 +2463,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-time="2017-05-18 14:18:11+03" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-time="2024-05-03 11:18:13+03" @@ -1912,7 +2472,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-xid option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-xid=687 +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-xid=687 @@ -1921,7 +2481,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-lsn option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-lsn=16/B374D848 +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-lsn=16/B374D848 @@ -1930,7 +2490,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-name option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-name="before_app_upgrade" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-name="before_app_upgrade" @@ -1940,7 +2500,7 @@ pg_probackup restore -B backup_dir --instance latest value: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target="latest" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target="latest" @@ -1950,7 +2510,7 @@ pg_probackup restore -B backup_dir --instance immediate value: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target='immediate' +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target='immediate' @@ -1958,7 +2518,7 @@ pg_probackup restore -B backup_dir --instance Codestin Search App - pg_probackup supports the remote mode that allows to perform + pg_probackup supports the remote mode that allows you to perform backup and restore operations remotely via SSH. In this mode, the backup catalog is stored on a local system, while PostgreSQL instance to be backed @@ -1971,6 +2531,15 @@ pg_probackup restore -B backup_dir --instance + + + In addition to SSH connection, pg_probackup uses + a regular connection to the database to manage the remote operation. + See the section Configuring + the Database Cluster for details of how to set up + a database connection. + + The typical workflow is as follows: @@ -1979,8 +2548,7 @@ pg_probackup restore -B backup_dir --instance On your backup host, configure pg_probackup as explained in the section - Installation and - Setup. For the + Setup. For the and commands, make sure to specify remote @@ -2026,7 +2594,7 @@ pg_probackup restore -B backup_dir --instance 2302, run: -pg_probackup backup -B backup_dir --instance instance_name -b FULL --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 To restore the latest available backup on a remote system with host address @@ -2034,7 +2602,7 @@ pg_probackup backup -B backup_dir --instance 2302, run: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 Restoring an ARCHIVE backup or performing PITR in the remote mode @@ -2061,20 +2629,20 @@ pg_probackup restore -B backup_dir --instance 2303, run: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --archive-host=192.168.0.3 --archive-port=2303 --archive-user=backup +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --archive-host=192.168.0.3 --archive-port=2303 --archive-user=backup Provided arguments will be used to construct the restore_command: -restore_command = '"install_dir/pg_probackup" archive-get -B "backup_dir" --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' +restore_command = '"install_dir/pg_probackup" archive-get -B "backup_dir" --instance=instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' Alternatively, you can use the option to provide the entire restore_command: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --restore-command='"install_dir/pg_probackup" archive-get -B "backup_dir" --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --restore-command='"install_dir/pg_probackup" archive-get -B "backup_dir" --instance=instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' @@ -2103,7 +2671,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL -j 4 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL -j 4 @@ -2164,14 +2732,14 @@ pg_probackup backup -B backup_dir --instance set-config command: -pg_probackup set-config -B backup_dir --instance instance_name +pg_probackup set-config -B backup_dir --instance=instance_name [--external-dirs=external_directory_path] [remote_options] [connection_options] [retention_options] [logging_options] To view the current settings, run the following command: -pg_probackup show-config -B backup_dir --instance instance_name +pg_probackup show-config -B backup_dir --instance=instance_name You can override the settings defined in pg_probackup.conf when @@ -2245,16 +2813,16 @@ pg_probackup show -B backup_dir pg_probackup displays the list of all the available backups. For example: - + BACKUP INSTANCE 'node' ====================================================================================================================================== - Instance Version ID Recovery time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status ====================================================================================================================================== - node 10 PYSUE8 2019-10-03 15:51:48+03 FULL ARCHIVE 1/0 16s 9047kB 16MB 4.31 0/12000028 0/12000160 OK - node 10 P7XDQV 2018-04-29 05:32:59+03 DELTA STREAM 1/1 11s 19MB 16MB 1.00 0/15000060 0/15000198 OK - node 10 P7XDJA 2018-04-29 05:28:36+03 PTRACK STREAM 1/1 21s 32MB 32MB 1.00 0/13000028 0/13000198 OK - node 10 P7XDHU 2018-04-29 05:27:59+03 PAGE STREAM 1/1 15s 33MB 16MB 1.00 0/11000028 0/110001D0 OK - node 10 P7XDHB 2018-04-29 05:27:15+03 FULL STREAM 1/0 11s 39MB 16MB 1.00 0/F000028 0/F000198 OK + node 16 SCUN4E 2024-05-02 11:19:37+03 FULL ARCHIVE 1/0 13s 239MB 16MB 2.31 0/4C000028 0/4D0000B8 OK + node 16 SCUN3Y 2024-05-02 11:19:16+03 DELTA STREAM 1/1 7s 92MB 208MB 2.27 0/3C0043A8 0/46159C70 OK + node 16 SCUN3M 2024-05-02 11:19:01+03 PTRACK STREAM 1/1 10s 30MB 16MB 2.23 0/32000028 0/32005ED0 OK + node 16 SCUN39 2024-05-02 11:18:50+03 PAGE STREAM 1/1 12s 46MB 32MB 1.44 0/2A000028 0/2B0000B8 OK + node 16 SCUN2V 2024-05-02 11:18:38+03 FULL STREAM 1/0 11s 154MB 16MB 2.32 0/23000028 0/23000168 OK For each backup, the following information is provided: @@ -2404,12 +2972,12 @@ BACKUP INSTANCE 'node' show command with the backup ID: -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id The sample output is as follows: - + #Configuration backup-mode = FULL stream = false @@ -2419,27 +2987,26 @@ from-replica = false #Compatibility block-size = 8192 -wal-block-size = 8192 +xlog-block-size = 8192 checksum-version = 1 -program-version = 2.1.3 -server-version = 10 +program-version = 2.5.15 +server-version = 16 #Result backup info timelineid = 1 -start-lsn = 0/04000028 -stop-lsn = 0/040000f8 -start-time = '2017-05-16 12:57:29' -end-time = '2017-05-16 12:57:31' -recovery-xid = 597 -recovery-time = '2017-05-16 12:57:31' -expire-time = '2020-05-16 12:57:31' -data-bytes = 22288792 +start-lsn = 0/4C000028 +stop-lsn = 0/4D0000B8 +start-time = '2024-05-02 11:19:26+03' +end-time = '2024-05-02 11:19:39+03' +recovery-xid = 743 +recovery-time = '2024-05-02 11:19:37+03' +data-bytes = 250827955 wal-bytes = 16777216 -uncompressed-bytes = 39961833 -pgdata-bytes = 39859393 +uncompressed-bytes = 578216425 +pgdata-bytes = 578216107 status = OK -parent-backup-id = 'PT8XFX' -primary_conninfo = 'user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any' +primary_conninfo = 'user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable' +content-crc = 802820606 Detailed output has additional attributes: @@ -2548,44 +3115,46 @@ primary_conninfo = 'user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmod in the JSON format: -pg_probackup show -B backup_dir --instance instance_name --format=json -i backup_id +pg_probackup show -B backup_dir --instance=instance_name --format=json -i backup_id The sample output is as follows: - + [ - { - "instance": "node", - "backups": [ - { - "id": "PT91HZ", - "parent-backup-id": "PT8XFX", - "backup-mode": "DELTA", - "wal": "ARCHIVE", - "compress-alg": "zlib", - "compress-level": 1, - "from-replica": false, - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.3", - "server-version": "10", - "current-tli": 16, - "parent-tli": 2, - "start-lsn": "0/8000028", - "stop-lsn": "0/8000160", - "start-time": "2019-06-17 18:25:11+03", - "end-time": "2019-06-17 18:25:16+03", - "recovery-xid": 0, - "recovery-time": "2019-06-17 18:25:15+03", - "data-bytes": 106733, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } + { + "instance": "node", + "backups": [ + { + "id": "SCUN4E", + "backup-mode": "FULL", + "wal": "ARCHIVE", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 16, + "parent-tli": 2, + "start-lsn": "0/4C000028", + "stop-lsn": "0/4D0000B8", + "start-time": "2024-05-02 11:19:26+03", + "end-time": "2024-05-02 11:19:39+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:37+03", + "data-bytes": 250827955, + "wal-bytes": 16777216, + "uncompressed-bytes": 578216425, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 802820606 + } + ] + } ] @@ -2596,22 +3165,19 @@ pg_probackup show -B backup_dir --instance -pg_probackup show -B backup_dir [--instance instance_name] --archive +pg_probackup show -B backup_dir [--instance=instance_name] --archive pg_probackup displays the list of all the available WAL files grouped by timelines. For example: - + + ARCHIVE INSTANCE 'node' -=================================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=================================================================================================================================== - 5 1 0/B000000 00000005000000000000000B 00000005000000000000000C 2 685kB 48.00 0 OK - 4 3 0/18000000 000000040000000000000018 00000004000000000000001A 3 648kB 77.00 0 OK - 3 2 0/15000000 000000030000000000000015 000000030000000000000017 3 648kB 77.00 0 OK - 2 1 0/B000108 00000002000000000000000B 000000020000000000000015 5 892kB 94.00 1 DEGRADED - 1 0 0/0 000000010000000000000001 00000001000000000000000A 10 8774kB 19.00 1 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000019 00000001000000000000004D 53 848MB 1.00 5 OK For each timeline, the following information is provided: @@ -2695,219 +3261,176 @@ ARCHIVE INSTANCE 'node' format, run the command: -pg_probackup show -B backup_dir [--instance instance_name] --archive --format=json +pg_probackup show -B backup_dir [--instance=instance_name] --archive --format=json The sample output is as follows: - + [ - { - "instance": "replica", - "timelines": [ - { - "tli": 5, - "parent-tli": 1, - "switchpoint": "0/B000000", - "min-segno": "00000005000000000000000B", - "max-segno": "00000005000000000000000C", - "n-segments": 2, - "size": 685320, - "zratio": 48.00, - "closest-backup-id": "PXS92O", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 4, - "parent-tli": 3, - "switchpoint": "0/18000000", - "min-segno": "000000040000000000000018", - "max-segno": "00000004000000000000001A", - "n-segments": 3, - "size": 648625, - "zratio": 77.00, - "closest-backup-id": "PXS9CE", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 3, - "parent-tli": 2, - "switchpoint": "0/15000000", - "min-segno": "000000030000000000000015", - "max-segno": "000000030000000000000017", - "n-segments": 3, - "size": 648911, - "zratio": 77.00, - "closest-backup-id": "PXS9CE", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 2, - "parent-tli": 1, - "switchpoint": "0/B000108", - "min-segno": "00000002000000000000000B", - "max-segno": "000000020000000000000015", - "n-segments": 5, - "size": 892173, - "zratio": 94.00, - "closest-backup-id": "PXS92O", - "status": "DEGRADED", - "lost-segments": [ - { - "begin-segno": "00000002000000000000000D", - "end-segno": "00000002000000000000000E" - }, - { - "begin-segno": "000000020000000000000010", - "end-segno": "000000020000000000000012" - } - ], - "backups": [ - { - "id": "PXS9CE", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 2, - "parent-tli": 0, - "start-lsn": "0/C000028", - "stop-lsn": "0/C000160", - "start-time": "2019-09-13 21:43:26+03", - "end-time": "2019-09-13 21:43:30+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:43:29+03", - "data-bytes": 104674852, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - }, - { - "tli": 1, - "parent-tli": 0, - "switchpoint": "0/0", - "min-segno": "000000010000000000000001", - "max-segno": "00000001000000000000000A", - "n-segments": 10, - "size": 8774805, - "zratio": 19.00, - "closest-backup-id": "", - "status": "OK", - "lost-segments": [], - "backups": [ - { - "id": "PXS92O", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "true", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 0, - "start-lsn": "0/4000028", - "stop-lsn": "0/6000028", - "start-time": "2019-09-13 21:37:36+03", - "end-time": "2019-09-13 21:38:45+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:30+03", - "data-bytes": 25987319, - "wal-bytes": 50331648, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } - ] - }, - { - "instance": "master", - "timelines": [ - { - "tli": 1, - "parent-tli": 0, - "switchpoint": "0/0", - "min-segno": "000000010000000000000001", - "max-segno": "00000001000000000000000B", - "n-segments": 11, - "size": 8860892, - "zratio": 20.00, - "status": "OK", - "lost-segments": [], - "backups": [ - { - "id": "PXS92H", - "parent-backup-id": "PXS92C", - "backup-mode": "PAGE", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 1, - "start-lsn": "0/4000028", - "stop-lsn": "0/50000B8", - "start-time": "2019-09-13 21:37:29+03", - "end-time": "2019-09-13 21:37:31+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:30+03", - "data-bytes": 1328461, - "wal-bytes": 33554432, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - }, - { - "id": "PXS92C", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 0, - "start-lsn": "0/2000028", - "stop-lsn": "0/2000160", - "start-time": "2019-09-13 21:37:24+03", - "end-time": "2019-09-13 21:37:29+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:28+03", - "data-bytes": 24871902, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } - ] - } + { + "instance": "node", + "timelines": [ + { + "tli": 1, + "parent-tli": 0, + "switchpoint": "0/0", + "min-segno": "000000010000000000000019", + "max-segno": "00000001000000000000004D", + "n-segments": 53, + "size": 889192448, + "zratio": 1.00, + "closest-backup-id": "", + "status": "OK", + "lost-segments": [], + "backups": [ + { + "id": "SCUN4E", + "backup-mode": "FULL", + "wal": "ARCHIVE", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 0, + "start-lsn": "0/4C000028", + "stop-lsn": "0/4D0000B8", + "start-time": "2024-05-02 11:19:26+03", + "end-time": "2024-05-02 11:19:39+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:37+03", + "data-bytes": 250827955, + "wal-bytes": 16777216, + "uncompressed-bytes": 578216425, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 802820606 + }, + { + "id": "SCUN3Y", + "parent-backup-id": "SCUN3M", + "backup-mode": "DELTA", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/3C0043A8", + "stop-lsn": "0/46159C70", + "start-time": "2024-05-02 11:19:10+03", + "end-time": "2024-05-02 11:19:17+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:16+03", + "data-bytes": 96029293, + "wal-bytes": 218103808, + "uncompressed-bytes": 217639806, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3074300814 + }, + { + "id": "SCUN3M", + "parent-backup-id": "SCUN39", + "backup-mode": "PTRACK", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/32000028", + "stop-lsn": "0/32005ED0", + "start-time": "2024-05-02 11:18:58+03", + "end-time": "2024-05-02 11:19:08+03", + "recovery-xid": 742, + "recovery-time": "2024-05-02 11:19:01+03", + "data-bytes": 31205704, + "wal-bytes": 16777216, + "uncompressed-bytes": 69585790, + "pgdata-bytes": 509927595, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3446949708 + }, + { + "id": "SCUN39", + "parent-backup-id": "SCUN2V", + "backup-mode": "PAGE", + "wal": "STREAM", + "compress-alg": "pglz", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/2A000028", + "stop-lsn": "0/2B0000B8", + "start-time": "2024-05-02 11:18:45+03", + "end-time": "2024-05-02 11:18:57+03", + "recovery-xid": 741, + "recovery-time": "2024-05-02 11:18:50+03", + "data-bytes": 48381612, + "wal-bytes": 33554432, + "uncompressed-bytes": 69569406, + "pgdata-bytes": 441639083, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3492989773 + }, + { + "id": "SCUN2V", + "backup-mode": "FULL", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 0, + "start-lsn": "0/23000028", + "stop-lsn": "0/23000168", + "start-time": "2024-05-02 11:18:31+03", + "end-time": "2024-05-02 11:18:42+03", + "recovery-xid": 740, + "recovery-time": "2024-05-02 11:18:38+03", + "data-bytes": 161084290, + "wal-bytes": 16777216, + "uncompressed-bytes": 373359081, + "pgdata-bytes": 373358763, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 1621343133 + } + ] + } + ] + } ] @@ -2997,7 +3520,7 @@ pg_probackup show -B backup_dir [--instance -pg_probackup set-config -B backup_dir --instance instance_name --retention-redundancy=2 --retention-window=7 +pg_probackup set-config -B backup_dir --instance=instance_name --retention-redundancy=2 --retention-window=7 @@ -3015,7 +3538,7 @@ pg_probackup set-config -B backup_dir --instance --delete-expired flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired If you would like to also remove the WAL files that are no @@ -3023,7 +3546,7 @@ pg_probackup delete -B backup_dir --instance --delete-wal flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --delete-wal +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --delete-wal @@ -3034,7 +3557,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --retention-window=7 --retention-redundancy=2 +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --retention-window=7 --retention-redundancy=2 Since incremental backups require that their parent full @@ -3054,48 +3577,48 @@ pg_probackup delete -B backup_dir --instance backup_dir directory, with the option set to 7, and you have the following backups - available on April 10, 2019: + available on May 02, 2024: - + BACKUP INSTANCE 'node' -=================================================================================================================================== - Instance Version ID Recovery time Mode WAL TLI Time Data WAL Zratio Start LSN Stop LSN Status -=================================================================================================================================== - node 10 P7XDHR 2019-04-10 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/18000059 0/18000197 OK - node 10 P7XDQV 2019-04-08 05:32:59+03 PAGE STREAM 1/0 11s 19MB 16MB 1.0 0/15000060 0/15000198 OK - node 10 P7XDJA 2019-04-03 05:28:36+03 DELTA STREAM 1/0 21s 32MB 16MB 1.0 0/13000028 0/13000198 OK - -------------------------------------------------------retention window-------------------------------------------------------- - node 10 P7XDHU 2019-04-02 05:27:59+03 PAGE STREAM 1/0 31s 33MB 16MB 1.0 0/11000028 0/110001D0 OK - node 10 P7XDHB 2019-04-01 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/F000028 0/F000198 OK - node 10 P7XDFT 2019-03-29 05:26:25+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/D000028 0/D000198 OK +===================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +===================================================================================================================================== + node 16 SCUN6L 2024-05-02 11:20:48+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/46000028 0/470000B8 OK + node 16 SCQXUI 2024-04-30 11:20:45+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/44000028 0/450000F0 OK + node 16 SCFTUG 2024-04-24 11:20:43+03 DELTA ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/42000028 0/430000B8 OK +----------------------------------------------------------retention window----------------------------------------------------------- + node 16 SCDZ6D 2024-04-23 11:20:40+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/40000028 0/410000B8 OK + node 16 SCC4HX 2024-04-22 11:20:24+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/3E000028 0/3F0000F0 OK + node 16 SC8F5G 2024-04-20 11:20:07+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/3C0000D8 0/3D00BB58 OK - Even though P7XDHB and P7XDHU backups are outside the + Even though SCC4HX and SCDZ6D backups are outside the retention window, they cannot be removed as it invalidates the - succeeding incremental backups P7XDJA and P7XDQV that are + succeeding incremental backups SCFTUG and SCQXUI that are still required, so, if you run the command with the - flag, only the P7XDFT full + flag, only the SC8F5G full backup will be removed. - With the option, the P7XDJA - backup is merged with the underlying P7XDHU and P7XDHB backups + With the option, the SCFTUG + backup is merged with the underlying SCDZ6D and SCC4HX backups and becomes a full one, so there is no need to keep these expired backups anymore: -pg_probackup delete -B backup_dir --instance node --delete-expired --merge-expired +pg_probackup delete -B backup_dir --instance=node --delete-expired --merge-expired pg_probackup show -B backup_dir - + BACKUP INSTANCE 'node' -================================================================================================================================== - Instance Version ID Recovery time Mode WAL TLI Time Data WAL Zratio Start LSN Stop LSN Status -================================================================================================================================== - node 10 P7XDHR 2019-04-10 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/18000059 0/18000197 OK - node 10 P7XDQV 2019-04-08 05:32:59+03 PAGE STREAM 1/0 11s 19MB 16MB 1.0 0/15000060 0/15000198 OK - node 10 P7XDJA 2019-04-03 05:28:36+03 FULL STREAM 1/0 21s 32MB 16MB 1.0 0/13000028 0/13000198 OK +===================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +===================================================================================================================================== + node 16 SCUN6L 2024-05-02 11:20:48+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/46000028 0/470000B8 OK + node 16 SCQXUI 2024-04-30 11:20:45+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/44000028 0/450000F0 OK + node 16 SCFTUG 2024-04-24 11:20:43+03 FULL ARCHIVE 1/1 5s 296MB 16MB 1.00 0/42000028 0/430000B8 OK The Time field for the merged backup displays the time @@ -3111,7 +3634,7 @@ BACKUP INSTANCE 'node' for arbitrary time. For example: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --ttl=30d +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --ttl=30d This command sets the expiration time of the @@ -3123,7 +3646,7 @@ pg_probackup set-backup -B backup_dir --instance --expire-time option. For example: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --expire-time="2020-01-01 00:00:00+03" +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --expire-time="2027-05-02 11:21:00+00" Alternatively, you can use the and @@ -3132,23 +3655,23 @@ pg_probackup set-backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --ttl=30d -pg_probackup backup -B backup_dir --instance instance_name -b FULL --expire-time="2020-01-01 00:00:00+03" +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --ttl=30d +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --expire-time="2027-05-02 11:21:00+00" To check if the backup is pinned, run the command: -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id If the backup is pinned, it has the expire-time attribute that displays its expiration time: - + ... -recovery-time = '2017-05-16 12:57:31' -expire-time = '2020-01-01 00:00:00+03' +recovery-time = '2024-05-02 11:21:00+00' +expire-time = '2027-05-02 11:21:00+00' data-bytes = 22288792 ... @@ -3157,7 +3680,7 @@ data-bytes = 22288792 You can unpin the backup by setting the option to zero: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --ttl=0 +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --ttl=0 @@ -3221,19 +3744,18 @@ pg_probackup set-backup -B backup_dir --instance : -pg_probackup show -B backup_dir --instance node +pg_probackup show -B backup_dir --instance=node - -BACKUP INSTANCE 'node' -==================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -==================================================================================================================================== - node 11 PZ9442 2019-10-12 10:43:21+03 DELTA STREAM 1/0 10s 121kB 16MB 1.00 0/46000028 0/46000160 OK - node 11 PZ943L 2019-10-12 10:43:04+03 FULL STREAM 1/0 10s 180MB 32MB 1.00 0/44000028 0/44000160 OK - node 11 PZ7YR5 2019-10-11 19:49:56+03 DELTA STREAM 1/1 10s 112kB 32MB 1.00 0/41000028 0/41000160 OK - node 11 PZ7YMP 2019-10-11 19:47:16+03 DELTA STREAM 1/1 10s 376kB 32MB 1.00 0/3E000028 0/3F0000B8 OK - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - node 11 PZ7YFO 2019-10-11 19:43:04+03 FULL STREAM 1/0 10s 30MB 16MB 1.00 0/2000028 0/200ADD8 OK + +====================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +====================================================================================================================================== + node 16 SCUN92 2024-05-02 11:22:16+03 DELTA STREAM 1/1 9s 1162kB 32MB 1.08 0/7C000028 0/7C000168 OK + node 16 SCUN8N 2024-05-02 11:22:09+03 FULL STREAM 1/0 12s 296MB 16MB 2.30 0/7A000028 0/7A009A08 OK + node 16 SCUN8I 2024-05-02 11:21:55+03 DELTA STREAM 1/1 5s 1148kB 32MB 1.01 0/78000028 0/78000168 OK + node 16 SCUN86 2024-05-02 11:21:47+03 DELTA STREAM 1/1 11s 120MB 16MB 2.27 0/76000028 0/760001A0 OK + node 16 SCUN7I 2024-05-02 11:21:29+03 FULL STREAM 1/0 22s 296MB 288MB 2.30 0/63012FE8 0/74E7ADA0 OK + node 16 SCUN71 2024-05-02 11:21:12+03 FULL STREAM 1/0 13s 296MB 272MB 2.30 0/49000028 0/573683B8 OK You can check the state of the WAL archive by running the @@ -3241,28 +3763,30 @@ BACKUP INSTANCE 'node' flag: -pg_probackup show -B backup_dir --instance node --archive +pg_probackup show -B backup_dir --instance=node --archive - + + ARCHIVE INSTANCE 'node' -=============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=============================================================================================================================== - 1 0 0/0 000000010000000000000001 000000010000000000000047 71 36MB 31.00 6 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000048 00000001000000000000007C 53 848MB 1.00 6 OK WAL purge without cannot achieve much, only one segment is removed: -pg_probackup delete -B backup_dir --instance node --delete-wal +pg_probackup delete -B backup_dir --instance=node --delete-wal - + + ARCHIVE INSTANCE 'node' -=============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=============================================================================================================================== - 1 0 0/0 000000010000000000000002 000000010000000000000047 70 34MB 32.00 6 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000049 00000001000000000000007C 52 832MB 1.00 6 OK If you would like, for example, to keep only those WAL @@ -3270,28 +3794,30 @@ ARCHIVE INSTANCE 'node' option to 1: -pg_probackup delete -B backup_dir --instance node --delete-wal --wal-depth=1 +pg_probackup delete -B backup_dir --instance=node --delete-wal --wal-depth=1 - + + ARCHIVE INSTANCE 'node' -================================================================================================================================ - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -================================================================================================================================ - 1 0 0/0 000000010000000000000046 000000010000000000000047 2 143kB 228.00 6 OK +=============================================================================================================================== + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +=============================================================================================================================== + 1 0 0/0 00000001000000000000007C 00000001000000000000007C 1 16MB 1.00 6 OK Alternatively, you can use the option with the command: -pg_probackup backup -B backup_dir --instance node -b DELTA --wal-depth=1 --delete-wal +pg_probackup backup -B backup_dir --instance=node -b DELTA --wal-depth=1 --delete-wal - + + ARCHIVE INSTANCE 'node' =============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status =============================================================================================================================== - 1 0 0/0 000000010000000000000048 000000010000000000000049 1 72kB 228.00 7 OK + 1 0 0/0 00000001000000000000007E 00000001000000000000007E 1 16MB 1.00 7 OK @@ -3305,7 +3831,7 @@ ARCHIVE INSTANCE 'node' recent incremental backup you would like to merge: -pg_probackup merge -B backup_dir --instance instance_name -i backup_id +pg_probackup merge -B backup_dir --instance=instance_name -i backup_id This command merges backups that belong to a common incremental backup @@ -3315,7 +3841,7 @@ pg_probackup merge -B backup_dir --instance pg_probackup in the remote mode. @@ -3327,7 +3853,7 @@ pg_probackup merge -B backup_dir --instance -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id If the merge is still in progress, the backup status is @@ -3345,7 +3871,7 @@ pg_probackup show -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name -i backup_id +pg_probackup delete -B backup_dir --instance=instance_name -i backup_id This command will delete the backup with the specified @@ -3361,7 +3887,7 @@ pg_probackup delete -B backup_dir --instance --delete-wal flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-wal +pg_probackup delete -B backup_dir --instance=instance_name --delete-wal To delete backups that are expired according to the current @@ -3369,7 +3895,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired Expired backups cannot be removed while at least one @@ -3380,7 +3906,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --merge-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --merge-expired In this case, pg_probackup searches for the oldest incremental @@ -3400,7 +3926,7 @@ pg_probackup delete -B backup_dir --instance --status: -pg_probackup delete -B backup_dir --instance instance_name --status=ERROR +pg_probackup delete -B backup_dir --instance=instance_name --status=ERROR @@ -3454,10 +3980,7 @@ pg_probackup delete -B backup_dir --instance DDL commands - CREATE TABLESPACE/DROP TABLESPACE + CREATE TABLESPACE/DROP TABLESPACE cannot be run simultaneously with catchup. @@ -3475,7 +3998,7 @@ pg_probackup delete -B backup_dir --instance To prepare for cloning/synchronizing a PostgreSQL instance, - set up the source instance server as follows: + set up the source server as follows: @@ -3498,7 +4021,7 @@ pg_probackup delete -B backup_dir --instance Before cloning/synchronizing a PostgreSQL instance, ensure that the source - instance server is running and accepting connections. To clone/sync a PostgreSQL instance, + server is running and accepting connections. To clone/sync a PostgreSQL instance, on the server with the destination instance, you can run the command as follows: @@ -3541,7 +4064,7 @@ pg_probackup catchup -b catchup_mode --source-pgdata= By specifying the option, you can set STREAM WAL delivery mode of copying, which will include all the necessary WAL files by streaming them from - the instance server via replication protocol. + the server via replication protocol. You can use connection_options to specify @@ -3638,7 +4161,7 @@ pg_probackup init -B backup_dir [--help] Codestin Search App -pg_probackup add-instance -B backup_dir -D data_dir --instance instance_name [--help] +pg_probackup add-instance -B backup_dir -D data_dir --instance=instance_name [--help] Initializes a new backup instance inside the backup catalog @@ -3656,7 +4179,7 @@ pg_probackup add-instance -B backup_dir -D Codestin Search App -pg_probackup del-instance -B backup_dir --instance instance_name [--help] +pg_probackup del-instance -B backup_dir --instance=instance_name [--help] Deletes all backups and WAL files associated with the @@ -3666,7 +4189,7 @@ pg_probackup del-instance -B backup_dir --instance Codestin Search App -pg_probackup set-config -B backup_dir --instance instance_name +pg_probackup set-config -B backup_dir --instance=instance_name [--help] [--pgdata=pgdata-path] [--retention-redundancy=redundancy][--retention-window=window][--wal-depth=wal_depth] [--compress-algorithm=compression_algorithm] [--compress-level=compression_level] @@ -3692,7 +4215,7 @@ pg_probackup set-config -B backup_dir --instance Codestin Search App -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id {--ttl=ttl | --expire-time=time} [--note=backup_note] [--help] @@ -3723,7 +4246,8 @@ pg_probackup set-backup -B backup_dir --instance Codestin Search App -pg_probackup show-config -B backup_dir --instance instance_name [--format=plain|json] +pg_probackup show-config -B backup_dir --instance=instance_name [--format=plain|json] +[--no-scale-units] [logging_options] Displays the contents of the pg_probackup.conf configuration @@ -3734,6 +4258,18 @@ pg_probackup show-config -B backup_dir --instance JSON format. By default, configuration settings are shown as plain text. + + You can also specify the + option to display time and memory configuration settings in their base (unscaled) units. + Otherwise, the values are scaled to larger units for optimal display. + For example, if archive-timeout is 300, then + 5min is displayed, but if archive-timeout + is 301, then 301s is displayed. + Also, if the option is specified, configuration + settings are displayed without units and for the JSON format, + numeric and boolean values are not enclosed in quotes. This facilitates parsing + the output. + To edit pg_probackup.conf, use the command. @@ -3743,7 +4279,7 @@ pg_probackup show-config -B backup_dir --instance Codestin Search App pg_probackup show -B backup_dir -[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json] [--no-color] +[--help] [--instance=instance_name [-i backup_id | --archive]] [--format=plain|json] [--no-color] Shows the contents of the backup catalog. If @@ -3772,7 +4308,7 @@ pg_probackup show -B backup_dir Codestin Search App -pg_probackup backup -B backup_dir -b backup_mode --instance instance_name +pg_probackup backup -B backup_dir -b backup_mode --instance=instance_name [--help] [-j num_threads] [--progress] [-C] [--stream [-S slot_name] [--temp-slot]] [--backup-pg-log] [--no-validate] [--skip-block-validation] @@ -3955,7 +4491,7 @@ pg_probackup backup -B backup_dir -b bac Codestin Search App -pg_probackup restore -B backup_dir --instance instance_name +pg_probackup restore -B backup_dir --instance=instance_name [--help] [-D data_dir] [-i backup_id] [-j num_threads] [--progress] [-T OLDDIR=NEWDIR] [--external-mapping=OLDDIR=NEWDIR] [--skip-external-dirs] @@ -4166,7 +4702,7 @@ pg_probackup restore -B backup_dir --instance Codestin Search App pg_probackup checkdb -[-B backup_dir] [--instance instance_name] [-D data_dir] +[-B backup_dir] [--instance=instance_name] [-D data_dir] [--help] [-j num_threads] [--progress] [--amcheck [--skip-block-validation] [--checkunique] [--heapallindexed]] [connection_options] [logging_options] @@ -4256,7 +4792,7 @@ pg_probackup checkdb Codestin Search App pg_probackup validate -B backup_dir -[--help] [--instance instance_name] [-i backup_id] +[--help] [--instance=instance_name] [-i backup_id] [-j num_threads] [--progress] [--skip-block-validation] [recovery_target_options] [logging_options] @@ -4284,7 +4820,7 @@ pg_probackup validate -B backup_dir Codestin Search App -pg_probackup merge -B backup_dir --instance instance_name -i backup_id +pg_probackup merge -B backup_dir --instance=instance_name -i backup_id [--help] [-j num_threads] [--progress] [--no-validate] [--no-sync] [logging_options] @@ -4328,7 +4864,7 @@ pg_probackup merge -B backup_dir --instance Codestin Search App -pg_probackup delete -B backup_dir --instance instance_name +pg_probackup delete -B backup_dir --instance=instance_name [--help] [-j num_threads] [--progress] [--retention-redundancy=redundancy][--retention-window=window][--wal-depth=wal_depth] [--delete-wal] {-i backup_id | --delete-expired [--merge-expired] | --merge-expired | --status=backup_status} @@ -4375,7 +4911,7 @@ pg_probackup delete -B backup_dir --instance Codestin Search App -pg_probackup archive-push -B backup_dir --instance instance_name +pg_probackup archive-push -B backup_dir --instance=instance_name --wal-file-name=wal_file_name [--wal-file-path=wal_file_path] [--help] [--no-sync] [--compress] [--no-ready-rename] [--overwrite] [-j num_threads] [--batch-size=batch_size] @@ -4441,7 +4977,7 @@ pg_probackup archive-push -B backup_dir --instance Codestin Search App -pg_probackup archive-get -B backup_dir --instance instance_name --wal-file-path=wal_file_path --wal-file-name=wal_file_name +pg_probackup archive-get -B backup_dir --instance=instance_name --wal-file-path=wal_file_path --wal-file-name=wal_file_name [-j num_threads] [--batch-size=batch_size] [--prefetch-dir=prefetch_dir_path] [--no-validate-wal] [--help] [remote_options] [logging_options] @@ -4532,7 +5068,7 @@ pg_probackup catchup -b catchup_mode Copies the instance in STREAM WAL delivery mode, including all the necessary WAL files by streaming them from - the instance server via replication protocol. + the server via replication protocol. @@ -4797,8 +5333,7 @@ pg_probackup catchup -b catchup_mode Specifies the LSN of the write-ahead log location up to which - recovery will proceed. Can be used only when restoring - a database cluster of major version 10 or higher. + recovery will proceed. @@ -4820,7 +5355,7 @@ pg_probackup catchup -b catchup_mode If the time zone offset is not specified, the local time zone is used. - Example: --recovery-target-time="2020-01-01 00:00:00+03" + Example: --recovery-target-time="2027-05-02 11:21:00+00" @@ -5010,7 +5545,7 @@ pg_probackup catchup -b catchup_mode If the time zone offset is not specified, the local time zone is used. - Example: --expire-time="2020-01-01 00:00:00+03" + Example: --expire-time="2027-05-02 11:21:00+00" @@ -5760,352 +6295,9 @@ pg_probackup catchup -b catchup_mode - - Codestin Search App - - This section describes the options related to taking a backup - from standby. - - - - Starting from pg_probackup 2.0.24, backups can be - taken from standby without connecting to the master server, - so these options are no longer required. In lower versions, - pg_probackup had to connect to the master to determine - recovery time — the earliest moment for which you can - restore a consistent state of the database cluster. - - - - - - - - - Deprecated. Specifies the name of the database on the master - server to connect to. The connection is used only for managing - the backup process, so you can connect to any existing - database. Can be set in the pg_probackup.conf using the - command. - - - Default: postgres, the default PostgreSQL database - - - - - - - - - Deprecated. Specifies the host name of the system on which the - master server is running. - - - - - - - - - Deprecated. Specifies the TCP port or the local Unix domain - socket file extension on which the master server is listening - for connections. - - - Default: 5432, the PostgreSQL default port - - - - - - - - - Deprecated. User name to connect as. - - - Default: postgres, - the PostgreSQL default user name - - - - - - - - - - Deprecated. Wait time for WAL segment streaming via - replication, in seconds. By default, pg_probackup waits 300 - seconds. You can also define this parameter in the - pg_probackup.conf configuration file using the - command. - - - Default: 300 sec - - - - - - - - Codestin Search App - - All examples below assume the remote mode of operations via - SSH. If you are planning to run backup and - restore operation locally, skip the - Setup passwordless SSH connection step - and omit all options. - - - Examples are based on Ubuntu 18.04, - PostgreSQL 11, and pg_probackup - 2.2.0. - - - - - backupPostgreSQL - role used for connection to PostgreSQL - cluster. - - - - - backupdb — database used for connection - to PostgreSQL cluster. - - - - - backup_host — host with backup catalog. - - - - - backupman — user on - backup_host running all pg_probackup - operations. - - - - - /mnt/backups — directory on - backup_host where backup catalog is stored. - - - - - postgres_host — host with PostgreSQL - cluster. - - - - - postgres — user on - postgres_host that has started the PostgreSQL cluster. - - - - - /var/lib/postgresql/11/mainPostgreSQL - data directory on postgres_host. - - - - - Codestin Search App - - This scenario illustrates setting up standalone FULL and DELTA backups. - - - - Codestin Search App - -[backupman@backup_host] ssh-copy-id postgres@postgres_host - - - - Codestin Search App - - For security purposes, it is recommended to use a separate - database for backup operations. - - -postgres=# -CREATE DATABASE backupdb; - - - Connect to the backupdb database, create the - probackup role, and grant the following - permissions to this role: - - -backupdb=# -BEGIN; -CREATE ROLE backup WITH LOGIN REPLICATION; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; -COMMIT; - - - - Codestin Search App - -[backupman@backup_host]$ pg_probackup-11 init -B /mnt/backups -INFO: Backup catalog '/mnt/backups' successfully inited - - - - Codestin Search App - -[backupman@backup_host]$ pg_probackup-11 add-instance -B /mnt/backups --instance pg-11 --remote-host=postgres_host --remote-user=postgres -D /var/lib/postgresql/11/main -INFO: Instance 'node' successfully inited - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b FULL --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YK2, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YK2 -INFO: Backup PZ7YK2 data files are valid -INFO: Backup PZ7YK2 resident size: 196MB -INFO: Backup PZ7YK2 completed - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance pg-11 - -BACKUP INSTANCE 'pg-11' -================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -================================================================================================================================== - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b delta --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YMP, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Parent backup: PZ7YK2 -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YMP -INFO: Backup PZ7YMP data files are valid -INFO: Backup PZ7YMP resident size: 32MB -INFO: Backup PZ7YMP completed - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 set-config -B /mnt/backups --instance pg-11 --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b delta --stream -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YR5, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Parent backup: PZ7YMP -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YR5 -INFO: Backup PZ7YR5 data files are valid -INFO: Backup PZ7YR5 resident size: 32MB -INFO: Backup PZ7YR5 completed - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 show-config -B /mnt/backups --instance pg-11 - -# Backup instance information -pgdata = /var/lib/postgresql/11/main -system-identifier = 6746586934060931492 -xlog-seg-size = 16777216 -# Connection parameters -pgdatabase = backupdb -pghost = postgres_host -pguser = backup -# Replica parameters -replica-timeout = 5min -# Archive parameters -archive-timeout = 5min -# Logging parameters -log-level-console = INFO -log-level-file = OFF -log-format-console = PLAIN -log-format-file = PLAIN -log-filename = pg_probackup.log -log-rotation-size = 0 -log-rotation-age = 0 -# Retention parameters -retention-redundancy = 0 -retention-window = 0 -wal-depth = 0 -# Compression parameters -compress-algorithm = none -compress-level = 1 -# Remote access parameters -remote-proto = ssh -remote-host = postgres_host - - - Note that we are getting the default values for other options - that were not overwritten by the set-config command. - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance pg-11 - -==================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -==================================================================================================================================== - node 11 PZ7YR5 2019-10-11 19:49:56+03 DELTA STREAM 1/1 10s 112kB 32MB 1.00 0/41000028 0/41000160 OK - node 11 PZ7YMP 2019-10-11 19:47:16+03 DELTA STREAM 1/1 10s 376kB 32MB 1.00 0/3E000028 0/3F0000B8 OK - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - - - - - - Codestin Search App diff --git a/doc/stylesheet.css b/doc/stylesheet.css index 4d84058f5..31464154b 100644 --- a/doc/stylesheet.css +++ b/doc/stylesheet.css @@ -119,7 +119,8 @@ body { } .book code, kbd, pre, samp { - font-family: monospace,monospace; + font-family: monospace,monospace; + font-size: 90%; } .book .txtCommentsWrap { diff --git a/po/LINGUAS b/po/LINGUAS new file mode 100644 index 000000000..562ba4cf0 --- /dev/null +++ b/po/LINGUAS @@ -0,0 +1 @@ +ru diff --git a/src/archive.c b/src/archive.c index 734602cac..7d753c8b3 100644 --- a/src/archive.c +++ b/src/archive.c @@ -13,14 +13,6 @@ #include "utils/thread.h" #include "instr_time.h" -static int push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - uint32 archive_timeout); -#ifdef HAVE_LIBZ -static int push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - int compress_level, uint32 archive_timeout); -#endif static void *push_files(void *arg); static void *get_files(void *arg); static bool get_wal_file(const char *filename, const char *from_path, const char *to_path, @@ -91,8 +83,19 @@ typedef struct WALSegno { char name[MAXFNAMELEN]; volatile pg_atomic_flag lock; + volatile pg_atomic_uint32 done; + struct WALSegno* prev; } WALSegno; +static int push_file_internal_uncompressed(WALSegno *wal_file_name, const char *pg_xlog_dir, + const char *archive_dir, bool overwrite, bool no_sync, + uint32 archive_timeout); +#ifdef HAVE_LIBZ +static int push_file_internal_gz(WALSegno *wal_file_name, const char *pg_xlog_dir, + const char *archive_dir, bool overwrite, bool no_sync, + int compress_level, uint32 archive_timeout); +#endif + static int push_file(WALSegno *xlogfile, const char *archive_status_dir, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, uint32 archive_timeout, @@ -337,16 +340,18 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, /* If compression is not required, then just copy it as is */ if (!is_compress) - rc = push_file_internal_uncompressed(xlogfile->name, pg_xlog_dir, + rc = push_file_internal_uncompressed(xlogfile, pg_xlog_dir, archive_dir, overwrite, no_sync, archive_timeout); #ifdef HAVE_LIBZ else - rc = push_file_internal_gz(xlogfile->name, pg_xlog_dir, archive_dir, + rc = push_file_internal_gz(xlogfile, pg_xlog_dir, archive_dir, overwrite, no_sync, compress_level, archive_timeout); #endif + pg_atomic_write_u32(&xlogfile->done, 1); + /* take '--no-ready-rename' flag into account */ if (!no_ready_rename && archive_status_dir != NULL) { @@ -381,13 +386,14 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, * has the same checksum */ int -push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_dir, +push_file_internal_uncompressed(WALSegno *wal_file, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, uint32 archive_timeout) { FILE *in = NULL; int out = -1; char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ + const char *wal_file_name = wal_file->name; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; /* partial handling */ @@ -409,7 +415,10 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Open source file for read */ in = fopen(from_fullpath, PG_BINARY_R); if (in == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open source file \"%s\": %s", from_fullpath, strerror(errno)); + } /* disable stdio buffering for input file */ setvbuf(in, NULL, _IONBF, BUFSIZ); @@ -422,8 +431,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } /* Already existing destination temp file is not an error condition */ } else @@ -453,15 +465,21 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } else /* Successfully created partial file */ break; } else + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot stat temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } /* first round */ @@ -492,8 +510,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (!partial_is_stale) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", to_fullpath_part, archive_timeout); + } /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_part); @@ -501,7 +522,10 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d out = fio_open(to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, FIO_BACKUP_HOST); if (out < 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } part_opened: @@ -536,6 +560,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d * so we must unlink partial file and exit with error. */ fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath); } @@ -553,6 +578,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (ferror(in)) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot read source file \"%s\": %s", from_fullpath, strerror(errno)); } @@ -560,6 +586,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (read_len > 0 && fio_write_async(out, buf, read_len) != read_len) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to destination temp file \"%s\": %s", to_fullpath_part, strerror(errno)); } @@ -575,14 +602,29 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (fio_check_error_fd(out, &errmsg)) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to the remote file \"%s\": %s", to_fullpath_part, errmsg); } + if (wal_file->prev != NULL) + { + while (!pg_atomic_read_u32(&wal_file->prev->done)) + { + if (thread_interrupted || interrupted) + { + pg_atomic_write_u32(&wal_file->done, 1); + elog(ERROR, "Terminated while waiting for prev file"); + } + usleep(250); + } + } + /* close temp file */ if (fio_close(out) != 0) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot close temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); } @@ -591,8 +633,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (!no_sync) { if (fio_sync(to_fullpath_part, FIO_BACKUP_HOST) != 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } elog(LOG, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath); @@ -603,6 +648,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (fio_rename(to_fullpath_part, to_fullpath, FIO_BACKUP_HOST) < 0) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", to_fullpath_part, to_fullpath, strerror(errno)); } @@ -620,13 +666,14 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d * has the same checksum */ int -push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, +push_file_internal_gz(WALSegno *wal_file, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, int compress_level, uint32 archive_timeout) { FILE *in = NULL; gzFile out = NULL; char *buf = pgut_malloc(OUT_BUF_SIZE); + const char *wal_file_name = wal_file->name; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; char to_fullpath_gz[MAXPGPATH]; @@ -656,8 +703,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Open source file for read */ in = fopen(from_fullpath, PG_BINARY_R); if (in == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open source WAL file \"%s\": %s", from_fullpath, strerror(errno)); + } /* disable stdio buffering for input file */ setvbuf(in, NULL, _IONBF, BUFSIZ); @@ -667,8 +717,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } /* Already existing destination temp file is not an error condition */ } else @@ -698,16 +751,22 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } else /* Successfully created partial file */ break; } else + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot stat temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } /* first round */ @@ -738,8 +797,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (!partial_is_stale) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", to_fullpath_gz_part, archive_timeout); + } /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_gz_part); @@ -747,8 +809,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, out = fio_gzopen(to_fullpath_gz_part, PG_BINARY_W, compress_level, FIO_BACKUP_HOST); if (out == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } part_opened: @@ -784,6 +849,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, * so we must unlink partial file and exit with error. */ fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath_gz); } @@ -801,6 +867,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (ferror(in)) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot read from source file \"%s\": %s", from_fullpath, strerror(errno)); } @@ -808,6 +875,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (read_len > 0 && fio_gzwrite(out, buf, read_len) != read_len) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, get_gz_error(out, errno)); } @@ -823,14 +891,29 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (fio_check_error_fd_gz(out, &errmsg)) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to the remote compressed file \"%s\": %s", to_fullpath_gz_part, errmsg); } + if (wal_file->prev != NULL) + { + while (!pg_atomic_read_u32(&wal_file->prev->done)) + { + if (thread_interrupted || interrupted) + { + pg_atomic_write_u32(&wal_file->done, 1); + elog(ERROR, "Terminated while waiting for prev file"); + } + usleep(250); + } + } + /* close temp file, TODO: make it synchronous */ if (fio_gzclose(out) != 0) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot close compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); } @@ -839,8 +922,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (!no_sync) { if (fio_sync(to_fullpath_gz_part, FIO_BACKUP_HOST) != 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } elog(LOG, "Rename \"%s\" to \"%s\"", @@ -852,6 +938,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (fio_rename(to_fullpath_gz_part, to_fullpath_gz, FIO_BACKUP_HOST) < 0) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", to_fullpath_gz_part, to_fullpath_gz, strerror(errno)); } @@ -905,6 +992,15 @@ get_gz_error(gzFile gzf, int errnum) // } //} +static int +walSegnoCompareName(const void *f1, const void *f2) +{ + WALSegno *w1 = *(WALSegno**)f1; + WALSegno *w2 = *(WALSegno**)f2; + + return strcmp(w1->name, w2->name); +} + /* Look for files with '.ready' suffix in archive_status directory * and pack such files into batch sized array. */ @@ -912,14 +1008,15 @@ parray * setup_push_filelist(const char *archive_status_dir, const char *first_file, int batch_size) { - int i; WALSegno *xlogfile = NULL; parray *status_files = NULL; parray *batch_files = parray_new(); + size_t i; /* guarantee that first filename is in batch list */ - xlogfile = palloc(sizeof(WALSegno)); + xlogfile = palloc0(sizeof(WALSegno)); pg_atomic_init_flag(&xlogfile->lock); + pg_atomic_init_u32(&xlogfile->done, 0); snprintf(xlogfile->name, MAXFNAMELEN, "%s", first_file); parray_append(batch_files, xlogfile); @@ -950,8 +1047,9 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, if (strcmp(filename, first_file) == 0) continue; - xlogfile = palloc(sizeof(WALSegno)); + xlogfile = palloc0(sizeof(WALSegno)); pg_atomic_init_flag(&xlogfile->lock); + pg_atomic_init_u32(&xlogfile->done, 0); snprintf(xlogfile->name, MAXFNAMELEN, "%s", filename); parray_append(batch_files, xlogfile); @@ -960,6 +1058,13 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, break; } + parray_qsort(batch_files, walSegnoCompareName); + for (i = 1; i < parray_num(batch_files); i++) + { + xlogfile = (WALSegno*) parray_get(batch_files, i); + xlogfile->prev = (WALSegno*) parray_get(batch_files, i-1); + } + /* cleanup */ parray_walk(status_files, pgFileFree); parray_free(status_files); diff --git a/src/backup.c b/src/backup.c index 35fc98092..78c3512e9 100644 --- a/src/backup.c +++ b/src/backup.c @@ -13,6 +13,9 @@ #if PG_VERSION_NUM < 110000 #include "catalog/catalog.h" #endif +#if PG_VERSION_NUM < 120000 +#include "access/transam.h" +#endif #include "catalog/pg_tablespace.h" #include "pgtar.h" #include "streamutil.h" @@ -65,7 +68,10 @@ static bool pg_is_in_recovery(PGconn *conn); static bool pg_is_superuser(PGconn *conn); static void check_server_version(PGconn *conn, PGNodeInfo *nodeInfo); static void confirm_block_size(PGconn *conn, const char *name, int blcksz); -static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); +static void rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); +static bool remove_excluded_files_criterion(void *value, void *exclude_args); +static void backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments); +static void process_file(int i, pgFile *file, backup_files_arg *arguments); static StopBackupCallbackParams stop_callback_params; @@ -78,7 +84,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) */ if (backup_in_progress) { - elog(WARNING, "backup in progress, stop backup"); + elog(WARNING, "A backup is in progress, stopping it."); /* don't care about stop_lsn in case of error */ pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL); } @@ -116,6 +122,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, char pretty_time[20]; char pretty_bytes[20]; + pgFile *src_pg_control_file = NULL; + elog(INFO, "Database backup start"); if(current.external_dir_str) { @@ -418,6 +426,24 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, } + /* + * find pg_control file + * We'll copy it last + */ + { + int control_file_elem_index; + pgFile search_key; + MemSet(&search_key, 0, sizeof(pgFile)); + /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */ + search_key.rel_path = XLOG_CONTROL_FILE; + search_key.external_dir_num = 0; + control_file_elem_index = parray_bsearch_index(backup_files_list, &search_key, pgFileCompareRelPathWithExternal); + + if (control_file_elem_index < 0) + elog(ERROR, "File \"%s\" not found in PGDATA %s", XLOG_CONTROL_FILE, current.database_dir); + src_pg_control_file = (pgFile *)parray_get(backup_files_list, control_file_elem_index); + } + /* setup thread locks */ pfilearray_clear_locks(backup_files_list); @@ -477,6 +503,26 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, backup_isok = false; } + /* copy pg_control at very end */ + if (backup_isok) + { + + elog(progress ? INFO : LOG, "Progress: Backup file \"%s\"", + src_pg_control_file->rel_path); + + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + join_path_components(from_fullpath, instance_config.pgdata, src_pg_control_file->rel_path); + join_path_components(to_fullpath, current.database_dir, src_pg_control_file->rel_path); + + backup_non_data_file(src_pg_control_file, NULL, + from_fullpath, to_fullpath, + current.backup_mode, current.parent_backup, + true); + } + + + time(&end_time); pretty_time_interval(difftime(end_time, start_time), pretty_time, lengthof(pretty_time)); @@ -504,17 +550,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { pgFile *pg_control = NULL; - for (i = 0; i < parray_num(backup_files_list); i++) - { - pgFile *tmp_file = (pgFile *) parray_get(backup_files_list, i); + pg_control = src_pg_control_file; - if (tmp_file->external_dir_num == 0 && - (strcmp(tmp_file->rel_path, XLOG_CONTROL_FILE) == 0)) - { - pg_control = tmp_file; - break; - } - } if (!pg_control) elog(ERROR, "Failed to find file \"%s\" in backup filelist.", @@ -705,8 +742,9 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, char pretty_bytes[20]; if (!instance_config.pgdata) - elog(ERROR, "required parameter not specified: PGDATA " - "(-D, --pgdata)"); + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); /* Initialize PGInfonode */ pgNodeInit(&nodeInfo); @@ -930,12 +968,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) if (nodeInfo->server_version < 90500) elog(ERROR, - "server version is %s, must be %s or higher", + "Server version is %s, must be %s or higher", nodeInfo->server_version_str, "9.5"); if (current.from_replica && nodeInfo->server_version < 90600) elog(ERROR, - "server version is %s, must be %s or higher for backup from replica", + "Server version is %s, must be %s or higher for backup from replica", nodeInfo->server_version_str, "9.6"); if (nodeInfo->pgpro_support) @@ -1044,7 +1082,7 @@ confirm_block_size(PGconn *conn, const char *name, int blcksz) res = pgut_execute(conn, "SELECT pg_catalog.current_setting($1)", 1, &name); if (PQntuples(res) != 1 || PQnfields(res) != 1) - elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(conn)); + elog(ERROR, "Cannot get %s: %s", name, PQerrorMessage(conn)); block_size = strtol(PQgetvalue(res, 0, 0), &endp, 10); if ((endp && *endp) || block_size != blcksz) @@ -1433,7 +1471,7 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l } if (!current.stream && is_start_lsn && try_count == 30) - elog(WARNING, "By default pg_probackup assume WAL delivery method to be ARCHIVE. " + elog(WARNING, "By default pg_probackup assumes that WAL delivery method to be ARCHIVE. " "If continuous archiving is not set up, use '--stream' option to make autonomous backup. " "Otherwise check that continuous archiving works correctly."); @@ -1769,9 +1807,9 @@ pg_stop_backup_consume(PGconn *conn, int server_version, { pgut_cancel(conn); #if PG_VERSION_NUM >= 150000 - elog(ERROR, "interrupted during waiting for pg_backup_stop"); + elog(ERROR, "Interrupted during waiting for pg_backup_stop"); #else - elog(ERROR, "interrupted during waiting for pg_stop_backup"); + elog(ERROR, "Interrupted during waiting for pg_stop_backup"); #endif } @@ -1817,7 +1855,7 @@ pg_stop_backup_consume(PGconn *conn, int server_version, case PGRES_TUPLES_OK: break; default: - elog(ERROR, "query failed: %s query was: %s", + elog(ERROR, "Query failed: %s query was: %s", PQerrorMessage(conn), query_text); } backup_in_progress = false; @@ -1828,13 +1866,13 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* get&check recovery_xid */ if (sscanf(PQgetvalue(query_result, 0, recovery_xid_colno), XID_FMT, &result->snapshot_xid) != 1) elog(ERROR, - "result of txid_snapshot_xmax() is invalid: %s", + "Result of txid_snapshot_xmax() is invalid: %s", PQgetvalue(query_result, 0, recovery_xid_colno)); /* get&check recovery_time */ if (!parse_time(PQgetvalue(query_result, 0, recovery_time_colno), &result->invocation_time, true)) elog(ERROR, - "result of current_timestamp is invalid: %s", + "Result of current_timestamp is invalid: %s", PQgetvalue(query_result, 0, recovery_time_colno)); /* get stop_backup_lsn */ @@ -1892,13 +1930,13 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c join_path_components(full_filename, path, filename); fp = fio_fopen(full_filename, PG_BINARY_W, FIO_BACKUP_HOST); if (fp == NULL) - elog(ERROR, "can't open %s file \"%s\": %s", + elog(ERROR, "Can't open %s file \"%s\": %s", error_msg_filename, full_filename, strerror(errno)); if (fio_fwrite(fp, data, len) != len || fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "can't write %s file \"%s\": %s", + elog(ERROR, "Can't write %s file \"%s\": %s", error_msg_filename, full_filename, strerror(errno)); /* @@ -1937,7 +1975,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb /* Remove it ? */ if (!backup_in_progress) - elog(ERROR, "backup is not in progress"); + elog(ERROR, "Backup is not in progress"); pg_silent_client_messages(pg_startbackup_conn); @@ -2054,8 +2092,6 @@ static void * backup_files(void *arg) { int i; - char from_fullpath[MAXPGPATH]; - char to_fullpath[MAXPGPATH]; static time_t prev_time; backup_files_arg *arguments = (backup_files_arg *) arg; @@ -2067,11 +2103,17 @@ backup_files(void *arg) for (i = 0; i < n_backup_files_list; i++) { pgFile *file = (pgFile *) parray_get(arguments->files_list, i); - pgFile *prev_file = NULL; /* We have already copied all directories */ if (S_ISDIR(file->mode)) continue; + /* + * Don't copy the pg_control file now, we'll copy it last + */ + if(file->external_dir_num == 0 && pg_strcasecmp(file->rel_path, XLOG_CONTROL_FILE) == 0) + { + continue; + } if (arguments->thread_num == 1) { @@ -2087,99 +2129,179 @@ backup_files(void *arg) } } + if (file->skip_cfs_nested) + continue; + if (!pg_atomic_test_set_flag(&file->lock)) continue; /* check for interrupt */ if (interrupted || thread_interrupted) - elog(ERROR, "interrupted during backup"); + elog(ERROR, "Interrupted during backup"); elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"", i + 1, n_backup_files_list, file->rel_path); - /* Handle zero sized files */ - if (file->size == 0) - { - file->write_size = 0; - continue; - } - - /* construct destination filepath */ - if (file->external_dir_num == 0) + if (file->is_cfs) { - join_path_components(from_fullpath, arguments->from_root, file->rel_path); - join_path_components(to_fullpath, arguments->to_root, file->rel_path); + backup_cfs_segment(i, file, arguments); } else { - char external_dst[MAXPGPATH]; - char *external_path = parray_get(arguments->external_dirs, - file->external_dir_num - 1); + process_file(i, file, arguments); + } + } + + /* ssh connection to longer needed */ + fio_disconnect(); + + /* Data files transferring is successful */ + arguments->ret = 0; + + return NULL; +} + +static void +process_file(int i, pgFile *file, backup_files_arg *arguments) +{ + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + pgFile *prev_file = NULL; + + elog(progress ? INFO : LOG, "Progress: (%d/%zu). Process file \"%s\"", + i + 1, parray_num(arguments->files_list), file->rel_path); - makeExternalDirPathByNum(external_dst, + /* Handle zero sized files */ + if (file->size == 0) + { + file->write_size = 0; + return; + } + + /* construct from_fullpath & to_fullpath */ + if (file->external_dir_num == 0) + { + join_path_components(from_fullpath, arguments->from_root, file->rel_path); + join_path_components(to_fullpath, arguments->to_root, file->rel_path); + } + else + { + char external_dst[MAXPGPATH]; + char *external_path = parray_get(arguments->external_dirs, + file->external_dir_num - 1); + + makeExternalDirPathByNum(external_dst, arguments->external_prefix, file->external_dir_num); - join_path_components(to_fullpath, external_dst, file->rel_path); - join_path_components(from_fullpath, external_path, file->rel_path); - } - - /* Encountered some strange beast */ - if (!S_ISREG(file->mode)) - elog(WARNING, "Unexpected type %d of file \"%s\", skipping", - file->mode, from_fullpath); + join_path_components(to_fullpath, external_dst, file->rel_path); + join_path_components(from_fullpath, external_path, file->rel_path); + } - /* Check that file exist in previous backup */ - if (current.backup_mode != BACKUP_MODE_FULL) - { - pgFile **prev_file_tmp = NULL; - prev_file_tmp = (pgFile **) parray_bsearch(arguments->prev_filelist, - file, pgFileCompareRelPathWithExternal); - if (prev_file_tmp) - { - /* File exists in previous backup */ - file->exists_in_prev = true; - prev_file = *prev_file_tmp; - } - } + /* Encountered some strange beast */ + if (!S_ISREG(file->mode)) + { + elog(WARNING, "Unexpected type %d of file \"%s\", skipping", + file->mode, from_fullpath); + return; + } - /* backup file */ - if (file->is_datafile && !file->is_cfs) - { - backup_data_file(file, from_fullpath, to_fullpath, - arguments->prev_start_lsn, - current.backup_mode, - instance_config.compress_alg, - instance_config.compress_level, - arguments->nodeInfo->checksum_version, - arguments->hdr_map, false); - } - else + /* Check that file exist in previous backup */ + if (current.backup_mode != BACKUP_MODE_FULL) + { + pgFile **prevFileTmp = NULL; + prevFileTmp = (pgFile **) parray_bsearch(arguments->prev_filelist, + file, pgFileCompareRelPathWithExternal); + if (prevFileTmp) { - backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, - current.backup_mode, current.parent_backup, true); + /* File exists in previous backup */ + file->exists_in_prev = true; + prev_file = *prevFileTmp; } + } - if (file->write_size == FILE_NOT_FOUND) - continue; + /* backup file */ + if (file->is_datafile && !file->is_cfs) + { + backup_data_file(file, from_fullpath, to_fullpath, + arguments->prev_start_lsn, + current.backup_mode, + instance_config.compress_alg, + instance_config.compress_level, + arguments->nodeInfo->checksum_version, + arguments->hdr_map, false); + } + else + { + backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, + current.backup_mode, current.parent_backup, true); + } - if (file->write_size == BYTES_INVALID) - { - elog(LOG, "Skipping the unchanged file: \"%s\"", from_fullpath); - continue; - } + if (file->write_size == FILE_NOT_FOUND) + return; - elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes", - from_fullpath, file->write_size); + if (file->write_size == BYTES_INVALID) + { + elog(LOG, "Skipping the unchanged file: \"%s\"", from_fullpath); + return; } - /* ssh connection to longer needed */ - fio_disconnect(); + elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes", + from_fullpath, file->write_size); - /* Data files transferring is successful */ - arguments->ret = 0; +} - return NULL; +static void +backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments) { + pgFile *data_file = file; + pgFile *cfm_file = NULL; + pgFile *data_bck_file = NULL; + pgFile *cfm_bck_file = NULL; + + while (data_file->cfs_chain) + { + data_file = data_file->cfs_chain; + if (data_file->forkName == cfm) + cfm_file = data_file; + if (data_file->forkName == cfs_bck) + data_bck_file = data_file; + if (data_file->forkName == cfm_bck) + cfm_bck_file = data_file; + } + data_file = file; + if (data_file->relOid >= FirstNormalObjectId && cfm_file == NULL) + { + elog(ERROR, "'CFS' file '%s' have to have '%s.cfm' companion file", + data_file->rel_path, data_file->name); + } + + elog(LOG, "backup CFS segment %s, data_file=%s, cfm_file=%s, data_bck_file=%s, cfm_bck_file=%s", + data_file->name, data_file->name, cfm_file->name, data_bck_file == NULL? "NULL": data_bck_file->name, cfm_bck_file == NULL? "NULL": cfm_bck_file->name); + + /* storing cfs segment. processing corner case [PBCKP-287] stage 1. + * - when we do have data_bck_file we should skip both data_bck_file and cfm_bck_file if exists. + * they are removed by cfs_recover() during postgres start. + */ + if (data_bck_file) + { + if (cfm_bck_file) + cfm_bck_file->write_size = FILE_NOT_FOUND; + data_bck_file->write_size = FILE_NOT_FOUND; + } + /* else we store cfm_bck_file. processing corner case [PBCKP-287] stage 2. + * - when we do have cfm_bck_file only we should store it. + * it will replace cfm_file after postgres start. + */ + else if (cfm_bck_file) + process_file(i, cfm_bck_file, arguments); + + /* storing cfs segment in order cfm_file -> datafile to guarantee their consistency */ + /* cfm_file could be NULL for system tables. But we don't clear is_cfs flag + * for compatibility with older pg_probackup. */ + if (cfm_file) + process_file(i, cfm_file, arguments); + process_file(i, data_file, arguments); + elog(LOG, "Backup CFS segment %s done", data_file->name); } /* @@ -2209,11 +2331,12 @@ parse_filelist_filenames(parray *files, const char *root) */ if (strcmp(file->name, "pg_compression") == 0) { + /* processing potential cfs tablespace */ Oid tblspcOid; Oid dbOid; char tmp_rel_path[MAXPGPATH]; /* - * Check that the file is located under + * Check that pg_compression is located under * TABLESPACE_VERSION_DIRECTORY */ sscanf_result = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s/%u", @@ -2222,8 +2345,10 @@ parse_filelist_filenames(parray *files, const char *root) /* Yes, it is */ if (sscanf_result == 2 && strncmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY, - strlen(TABLESPACE_VERSION_DIRECTORY)) == 0) - set_cfs_datafiles(files, root, file->rel_path, i); + strlen(TABLESPACE_VERSION_DIRECTORY)) == 0) { + /* rewind index to the beginning of cfs tablespace */ + rewind_and_mark_cfs_datafiles(files, root, file->rel_path, i); + } } } @@ -2238,7 +2363,7 @@ parse_filelist_filenames(parray *files, const char *root) */ int unlogged_file_num = i - 1; pgFile *unlogged_file = (pgFile *) parray_get(files, - unlogged_file_num); + unlogged_file_num); unlogged_file_reloid = file->relOid; @@ -2246,11 +2371,10 @@ parse_filelist_filenames(parray *files, const char *root) (unlogged_file_reloid != 0) && (unlogged_file->relOid == unlogged_file_reloid)) { - pgFileFree(unlogged_file); - parray_remove(files, unlogged_file_num); + /* flagged to remove from list on stage 2 */ + unlogged_file->remove_from_list = true; unlogged_file_num--; - i--; unlogged_file = (pgFile *) parray_get(files, unlogged_file_num); @@ -2260,6 +2384,22 @@ parse_filelist_filenames(parray *files, const char *root) i++; } + + /* stage 2. clean up from temporary tables */ + parray_remove_if(files, remove_excluded_files_criterion, NULL, pgFileFree); +} + +static bool +remove_excluded_files_criterion(void *value, void *exclude_args) { + pgFile *file = (pgFile*)value; + return file->remove_from_list; +} + +static uint32 +hash_rel_seg(pgFile* file) +{ + uint32 hash = hash_mix32_2(file->relOid, file->segno); + return hash_mix32_2(hash, 0xcf5); } /* If file is equal to pg_compression, then we consider this tablespace as @@ -2273,14 +2413,27 @@ parse_filelist_filenames(parray *files, const char *root) * tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid/1 * tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid/1.cfm * tblspcOid/TABLESPACE_VERSION_DIRECTORY/pg_compression + * + * @returns index of first tablespace entry, i.e tblspcOid/TABLESPACE_VERSION_DIRECTORY */ static void -set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) +rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) { int len; int p; + int j; pgFile *prev_file; + pgFile *tmp_file; char *cfs_tblspc_path; + uint32 h; + + /* hash table for cfm files */ +#define HASHN 128 + parray *hashtab[HASHN] = {NULL}; + parray *bucket; + for (p = 0; p < HASHN; p++) + hashtab[p] = parray_new(); + cfs_tblspc_path = strdup(relative); if(!cfs_tblspc_path) @@ -2295,21 +2448,60 @@ set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) elog(LOG, "Checking file in cfs tablespace %s", prev_file->rel_path); - if (strstr(prev_file->rel_path, cfs_tblspc_path) != NULL) + if (strstr(prev_file->rel_path, cfs_tblspc_path) == NULL) + { + elog(LOG, "Breaking on %s", prev_file->rel_path); + break; + } + + if (!S_ISREG(prev_file->mode)) + continue; + + h = hash_rel_seg(prev_file); + bucket = hashtab[h % HASHN]; + + if (prev_file->forkName == cfm || prev_file->forkName == cfm_bck || + prev_file->forkName == cfs_bck) + { + prev_file->skip_cfs_nested = true; + parray_append(bucket, prev_file); + } + else if (prev_file->is_datafile && prev_file->forkName == none) { - if (S_ISREG(prev_file->mode) && prev_file->is_datafile) + elog(LOG, "Processing 'cfs' file %s", prev_file->rel_path); + /* have to mark as is_cfs even for system-tables for compatibility + * with older pg_probackup */ + prev_file->is_cfs = true; + prev_file->cfs_chain = NULL; + for (j = 0; j < parray_num(bucket); j++) { - elog(LOG, "Setting 'is_cfs' on file %s, name %s", - prev_file->rel_path, prev_file->name); - prev_file->is_cfs = true; + tmp_file = parray_get(bucket, j); + elog(LOG, "Linking 'cfs' file '%s' to '%s'", + tmp_file->rel_path, prev_file->rel_path); + if (tmp_file->relOid == prev_file->relOid && + tmp_file->segno == prev_file->segno) + { + tmp_file->cfs_chain = prev_file->cfs_chain; + prev_file->cfs_chain = tmp_file; + parray_remove(bucket, j); + j--; + } } } - else + } + + for (p = 0; p < HASHN; p++) + { + bucket = hashtab[p]; + for (j = 0; j < parray_num(bucket); j++) { - elog(LOG, "Breaking on %s", prev_file->rel_path); - break; + tmp_file = parray_get(bucket, j); + elog(WARNING, "Orphaned cfs related file '%s'", tmp_file->rel_path); } + parray_free(bucket); + hashtab[p] = NULL; } +#undef HASHN free(cfs_tblspc_path); } diff --git a/src/catalog.c b/src/catalog.c index 92a2d84b7..b29090789 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -891,7 +891,7 @@ catalog_get_instance_list(CatalogState *catalogState) instanceState = pgut_new(InstanceState); - strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); + strlcpy(instanceState->instance_name, dent->d_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, catalogState->backup_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_wal_subdir_path, @@ -1055,7 +1055,7 @@ get_backup_filelist(pgBackup *backup, bool strict) fp = fio_open_stream(backup_filelist_path, FIO_BACKUP_HOST); if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); /* enable stdio buffering for local file */ if (!fio_is_remote(FIO_BACKUP_HOST)) @@ -1142,7 +1142,10 @@ get_backup_filelist(pgBackup *backup, bool strict) if (!file->is_datafile || file->is_cfs) file->size = file->uncompressed_size; - if (file->external_dir_num == 0 && S_ISREG(file->mode)) + if (file->external_dir_num == 0 && + (file->dbOid != 0 || + path_is_prefix_of_path("global", file->rel_path)) && + S_ISREG(file->mode)) { bool is_datafile = file->is_datafile; set_forkname(file); @@ -1623,7 +1626,8 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) } /* temp WAL segment */ else if (IsTempXLogFileName(file->name) || - IsTempCompressXLogFileName(file->name)) + IsTempCompressXLogFileName(file->name) || + IsTempPartialXLogFileName(file->name)) { elog(VERBOSE, "temp WAL file \"%s\"", file->name); @@ -2241,6 +2245,12 @@ do_set_backup(InstanceState *instanceState, time_t backup_id, if (set_backup_params->note) add_note(target_backup, set_backup_params->note); + /* Cleanup */ + if (backup_list) + { + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + } } /* @@ -2306,6 +2316,7 @@ add_note(pgBackup *target_backup, char *note) { char *note_string; + char *p; /* unset note */ if (pg_strcasecmp(note, "none") == 0) @@ -2322,8 +2333,8 @@ add_note(pgBackup *target_backup, char *note) * we save only "aaa" * Example: tests.set_backup.SetBackupTest.test_add_note_newlines */ - note_string = pgut_malloc(MAX_NOTE_SIZE); - sscanf(note, "%[^\n]", note_string); + p = strchr(note, '\n'); + note_string = pgut_strndup(note, p ? (p-note) : MAX_NOTE_SIZE); target_backup->note = note_string; elog(INFO, "Adding note to backup %s: '%s'", @@ -2830,7 +2841,7 @@ parse_backup_mode(const char *value) return BACKUP_MODE_DIFF_DELTA; /* Backup mode is invalid, so leave with an error */ - elog(ERROR, "invalid backup-mode \"%s\"", value); + elog(ERROR, "Invalid backup-mode \"%s\"", value); return BACKUP_MODE_INVALID; } @@ -2865,7 +2876,7 @@ parse_compress_alg(const char *arg) len = strlen(arg); if (len == 0) - elog(ERROR, "compress algorithm is empty"); + elog(ERROR, "Compress algorithm is empty"); if (pg_strncasecmp("zlib", arg, len) == 0) return ZLIB_COMPRESS; @@ -2874,7 +2885,7 @@ parse_compress_alg(const char *arg) else if (pg_strncasecmp("none", arg, len) == 0) return NONE_COMPRESS; else - elog(ERROR, "invalid compress algorithm value \"%s\"", arg); + elog(ERROR, "Invalid compress algorithm value \"%s\"", arg); return NOT_DEFINED_COMPRESS; } diff --git a/src/catchup.c b/src/catchup.c index 79e3361a8..00752b194 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -171,10 +171,13 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (current.backup_mode != BACKUP_MODE_FULL) { - dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST, false); + ControlFileData dst_control; + get_control_file_or_back_file(dest_pgdata, FIO_LOCAL_HOST, &dst_control); + dest_id = dst_control.system_identifier; + if (source_conn_id != dest_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", - source_conn_id, dest_pgdata, dest_id); + elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", + (long long)source_conn_id, dest_pgdata, (long long)dest_id); } } @@ -184,7 +187,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (source_node_info->ptrack_version_num == 0) elog(ERROR, "This PostgreSQL instance does not support ptrack"); else if (source_node_info->ptrack_version_num < 200) - elog(ERROR, "ptrack extension is too old.\n" + elog(ERROR, "Ptrack extension is too old.\n" "Upgrade ptrack to version >= 2"); else if (!source_node_info->is_ptrack_enabled) elog(ERROR, "Ptrack is disabled"); @@ -640,6 +643,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, ssize_t transfered_walfiles_bytes = 0; char pretty_source_bytes[20]; + char dest_pg_control_fullpath[MAXPGPATH]; + char dest_pg_control_bak_fullpath[MAXPGPATH]; + source_conn = catchup_init_state(&source_node_info, source_pgdata, dest_pgdata); catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata); @@ -935,6 +941,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, Assert(file->external_dir_num == 0); if (pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) redundant = true; + /* global/pg_control.pbk.bak is always keeped, because it's needed for restart failed incremental restore */ + if (pg_strcasecmp(file->rel_path, XLOG_CONTROL_BAK_FILE) == 0) + redundant = false; /* if file does not exists in destination list, then we can safely unlink it */ if (redundant) @@ -966,6 +975,28 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (dest_filelist) parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal); + join_path_components(dest_pg_control_fullpath, dest_pgdata, XLOG_CONTROL_FILE); + join_path_components(dest_pg_control_bak_fullpath, dest_pgdata, XLOG_CONTROL_BAK_FILE); + /* + * rename (if it exist) dest control file before restoring + * if it doesn't exist, that mean, that we already restoring in a previously failed + * pgdata, where XLOG_CONTROL_BAK_FILE exist + */ + if (current.backup_mode != BACKUP_MODE_FULL && !dry_run) + { + if (!fio_access(dest_pg_control_fullpath, F_OK, FIO_LOCAL_HOST)) + { + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + + if(!fio_access(dest_pg_control_bak_fullpath, F_OK, FIO_LOCAL_HOST)) + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + fio_rename(dest_pg_control_fullpath, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } + } + /* run copy threads */ elog(INFO, "Start transferring data files"); time(&start_time); @@ -985,6 +1016,15 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, copy_pgcontrol_file(from_fullpath, FIO_DB_HOST, to_fullpath, FIO_LOCAL_HOST, source_pg_control_file); transfered_datafiles_bytes += source_pg_control_file->size; + + /* Now backup control file can be deled */ + if (current.backup_mode != BACKUP_MODE_FULL && !fio_access(dest_pg_control_bak_fullpath, F_OK, FIO_LOCAL_HOST)){ + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } } if (!catchup_isok && !dry_run) diff --git a/src/checkdb.c b/src/checkdb.c index 1133a7b5d..2a7d4e9eb 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -145,7 +145,7 @@ check_files(void *arg) /* check for interrupt */ if (interrupted || thread_interrupted) - elog(ERROR, "interrupted during checkdb"); + elog(ERROR, "Interrupted during checkdb"); /* No need to check directories */ if (S_ISDIR(file->mode)) @@ -750,7 +750,7 @@ do_checkdb(bool need_amcheck, if (!skip_block_validation) { if (!pgdata) - elog(ERROR, "required parameter not specified: PGDATA " + elog(ERROR, "Required parameter not specified: PGDATA " "(-D, --pgdata)"); /* get node info */ diff --git a/src/configure.c b/src/configure.c index f7befb0c5..964548343 100644 --- a/src/configure.c +++ b/src/configure.c @@ -269,7 +269,7 @@ static const char *current_group = NULL; * Show configure options including default values. */ void -do_show_config(void) +do_show_config(bool show_base_units) { int i; @@ -277,10 +277,13 @@ do_show_config(void) for (i = 0; instance_options[i].type; i++) { + if (show_base_units && strchr("bBiIuU", instance_options[i].type) && instance_options[i].get_value == *option_get_value) + instance_options[i].flags |= GET_VAL_IN_BASE_UNITS; /* Set flag */ if (show_format == SHOW_PLAIN) show_configure_plain(&instance_options[i]); else show_configure_json(&instance_options[i]); + instance_options[i].flags &= ~(GET_VAL_IN_BASE_UNITS); /* Reset flag. It was resetted in option_get_value(). Probably this reset isn't needed */ } show_configure_end(); @@ -801,6 +804,6 @@ show_configure_json(ConfigOption *opt) return; json_add_value(&show_buf, opt->lname, value, json_level, - true); + !(opt->flags & GET_VAL_IN_BASE_UNITS)); pfree(value); } diff --git a/src/data.c b/src/data.c index 490faf9b6..1a9616bae 100644 --- a/src/data.c +++ b/src/data.c @@ -142,7 +142,7 @@ page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version) phdr = (PageHeader) page; /* First check if page header is valid (it seems to be fast enough check) */ - if (!(PageGetPageSize(phdr) == BLCKSZ && + if (!(PageGetPageSize(page) == BLCKSZ && // PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION && (phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 && phdr->pd_lower >= SizeOfPageHeaderData && @@ -181,7 +181,7 @@ parse_page(Page page, XLogRecPtr *lsn) /* Get lsn from page header */ *lsn = PageXLogRecPtrGet(phdr->pd_lsn); - if (PageGetPageSize(phdr) == BLCKSZ && + if (PageGetPageSize(page) == BLCKSZ && // PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION && (phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 && phdr->pd_lower >= SizeOfPageHeaderData && @@ -203,10 +203,10 @@ get_header_errormsg(Page page, char **errormsg) PageHeader phdr = (PageHeader) page; *errormsg = pgut_malloc(ERRMSG_MAX_LEN); - if (PageGetPageSize(phdr) != BLCKSZ) + if (PageGetPageSize(page) != BLCKSZ) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " "page size %lu is not equal to block size %u", - PageGetPageSize(phdr), BLCKSZ); + PageGetPageSize(page), BLCKSZ); else if (phdr->pd_lower < SizeOfPageHeaderData) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " @@ -815,6 +815,8 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) { file->write_size = BYTES_INVALID; + /* get full size from previous backup for unchanged file */ + file->uncompressed_size = prev_file->uncompressed_size; return; /* ...skip copying file. */ } } @@ -2488,7 +2490,10 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, file->rel_path, file->hdr_off, z_len, file->hdr_crc); if (fwrite(zheaders, 1, z_len, hdr_map->fp) != z_len) + { + pthread_mutex_unlock(&(hdr_map->mutex)); elog(ERROR, "Cannot write to file \"%s\": %s", map_path, strerror(errno)); + } file->hdr_size = z_len; /* save the length of compressed headers */ hdr_map->offset += z_len; /* update current offset in map */ diff --git a/src/delete.c b/src/delete.c index 3f299d78b..f48ecc95f 100644 --- a/src/delete.c +++ b/src/delete.c @@ -158,7 +158,13 @@ void do_retention(InstanceState *instanceState, bool no_validate, bool no_sync) /* Retention is disabled but we still can cleanup wal */ elog(WARNING, "Retention policy is not set"); if (!delete_wal) + { + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + parray_free(to_keep_list); + parray_free(to_purge_list); return; + } } else /* At least one retention policy is active */ @@ -552,7 +558,12 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, /* Try to remove merged incremental backup from both keep and purge lists */ parray_rm(to_purge_list, tmp_backup, pgBackupCompareId); - parray_set(to_keep_list, i, NULL); + for (i = 0; i < parray_num(to_keep_list); i++) + if (parray_get(to_keep_list, i) == tmp_backup) + { + parray_set(to_keep_list, i, NULL); + break; + } } if (!no_validate) pgBackupValidate(full_backup, NULL); @@ -1042,6 +1053,8 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, if (parray_num(backup_list) == 0) { elog(WARNING, "Instance '%s' has no backups", instanceState->instance_name); + parray_free(delete_list); + parray_free(backup_list); return; } diff --git a/src/dir.c b/src/dir.c index 0a55c0f67..4b1bc2816 100644 --- a/src/dir.c +++ b/src/dir.c @@ -151,11 +151,11 @@ dir_create_dir(const char *dir, mode_t mode, bool strict) { char parent[MAXPGPATH]; - strncpy(parent, dir, MAXPGPATH); + strlcpy(parent, dir, MAXPGPATH); get_parent_directory(parent); /* Create parent first */ - if (access(parent, F_OK) == -1) + if (strlen(parent) > 0 && access(parent, F_OK) == -1) dir_create_dir(parent, mode, false); /* Create directory */ @@ -182,7 +182,7 @@ pgFileNew(const char *path, const char *rel_path, bool follow_symlink, /* file not found is not an error case */ if (errno == ENOENT) return NULL; - elog(ERROR, "cannot stat file \"%s\": %s", path, + elog(ERROR, "Cannot stat file \"%s\": %s", path, strerror(errno)); } @@ -787,14 +787,14 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, for (arg_ptr = arg; *arg_ptr; arg_ptr++) { if (dst_ptr - dst >= MAXPGPATH) - elog(ERROR, "directory name too long"); + elog(ERROR, "Directory name too long"); if (*arg_ptr == '\\' && *(arg_ptr + 1) == '=') ; /* skip backslash escaping = */ else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\')) { if (*cell->new_dir) - elog(ERROR, "multiple \"=\" signs in %s mapping\n", type); + elog(ERROR, "Multiple \"=\" signs in %s mapping\n", type); else dst = dst_ptr = cell->new_dir; } @@ -803,7 +803,7 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, } if (!*cell->old_dir || !*cell->new_dir) - elog(ERROR, "invalid %s mapping format \"%s\", " + elog(ERROR, "Invalid %s mapping format \"%s\", " "must be \"OLDDIR=NEWDIR\"", type, arg); canonicalize_path(cell->old_dir); canonicalize_path(cell->new_dir); @@ -815,11 +815,11 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, * consistent with the new_dir check. */ if (!is_absolute_path(cell->old_dir)) - elog(ERROR, "old directory is not an absolute path in %s mapping: %s\n", + elog(ERROR, "Old directory is not an absolute path in %s mapping: %s\n", type, cell->old_dir); if (!is_absolute_path(cell->new_dir)) - elog(ERROR, "new directory is not an absolute path in %s mapping: %s\n", + elog(ERROR, "New directory is not an absolute path in %s mapping: %s\n", type, cell->new_dir); if (list->tail) @@ -964,7 +964,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba if (links) { /* get parent dir of rel_path */ - strncpy(parent_dir, dir->rel_path, MAXPGPATH); + strlcpy(parent_dir, dir->rel_path, MAXPGPATH); get_parent_directory(parent_dir); /* check if directory is actually link to tablespace */ @@ -1046,7 +1046,7 @@ read_tablespace_map(parray *links, const char *backup_dir) int i = 0; if (sscanf(buf, "%s %n", link_name, &n) != 1) - elog(ERROR, "invalid format found in \"%s\"", map_path); + elog(ERROR, "Invalid format found in \"%s\"", map_path); path = buf + n; @@ -1175,7 +1175,6 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg { pgFile *link = (pgFile *) parray_get(links, i); const char *linked_path = link->linked; - TablespaceListCell *cell; bool remapped = false; for (cell = tablespace_dirs.head; cell; cell = cell->next) @@ -1438,7 +1437,7 @@ get_control_value_str(const char *str, const char *name, { /* verify if value_str not exceeds value_str_size limits */ if (value_str - value_str_start >= value_str_size - 1) { - elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s", + elog(ERROR, "Field \"%s\" is out of range in the line %s of the file %s", name, str, DATABASE_FILE_LIST); } *value_str = *buf; @@ -1463,7 +1462,7 @@ get_control_value_str(const char *str, const char *name, /* Did not find target field */ if (is_mandatory) - elog(ERROR, "field \"%s\" is not found in the line %s of the file %s", + elog(ERROR, "Field \"%s\" is not found in the line %s of the file %s", name, str, DATABASE_FILE_LIST); return false; } @@ -1490,7 +1489,7 @@ dir_is_empty(const char *path, fio_location location) /* Directory in path doesn't exist */ if (errno == ENOENT) return true; - elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno)); + elog(ERROR, "Cannot open directory \"%s\": %s", path, strerror(errno)); } errno = 0; @@ -1506,7 +1505,7 @@ dir_is_empty(const char *path, fio_location location) return false; } if (errno) - elog(ERROR, "cannot read directory \"%s\": %s", path, strerror(errno)); + elog(ERROR, "Cannot read directory \"%s\": %s", path, strerror(errno)); fio_closedir(dir); @@ -1837,7 +1836,19 @@ set_forkname(pgFile *file) return false; } - /* CFS "fork name" */ + /* CFS family fork names */ + if (file->forkName == none && + is_forkname(file->name, &i, ".cfm.bck")) + { + /* /^\d+(\.\d+)?\.cfm\.bck$/ */ + file->forkName = cfm_bck; + } + if (file->forkName == none && + is_forkname(file->name, &i, ".bck")) + { + /* /^\d+(\.\d+)?\.bck$/ */ + file->forkName = cfs_bck; + } if (file->forkName == none && is_forkname(file->name, &i, ".cfm")) { @@ -1856,4 +1867,4 @@ set_forkname(pgFile *file) file->segno = segno; file->is_datafile = file->forkName == none; return true; -} +} \ No newline at end of file diff --git a/src/fetch.c b/src/fetch.c index bef30dac6..5401d815e 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -92,7 +92,7 @@ fetchFile(PGconn *conn, const char *filename, size_t *filesize) /* sanity check the result set */ if (PQntuples(res) != 1 || PQgetisnull(res, 0, 0)) - elog(ERROR, "unexpected result set while fetching remote file \"%s\"", + elog(ERROR, "Unexpected result set while fetching remote file \"%s\"", filename); /* Read result to local variables */ diff --git a/src/help.c b/src/help.c index 116a0711c..e18706a13 100644 --- a/src/help.c +++ b/src/help.c @@ -87,9 +87,9 @@ help_pg_probackup(void) printf(_("\n %s version\n"), PROGRAM_NAME); - printf(_("\n %s init -B backup-path\n"), PROGRAM_NAME); + printf(_("\n %s init -B backup-dir\n"), PROGRAM_NAME); - printf(_("\n %s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s set-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path]\n")); printf(_(" [--external-dirs=external-directories-paths]\n")); printf(_(" [--log-level-console=log-level-console]\n")); @@ -114,16 +114,17 @@ help_pg_probackup(void) printf(_(" [--archive-port=port] [--archive-user=username]\n")); printf(_(" [--help]\n")); - printf(_("\n %s set-backup -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s set-backup -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [--ttl=interval] [--expire-time=timestamp]\n")); printf(_(" [--note=text]\n")); printf(_(" [--help]\n")); - printf(_("\n %s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s show-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [--format=format]\n")); + printf(_(" [--no-scale-units]\n")); printf(_(" [--help]\n")); - printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s backup -B backup-dir -b backup-mode --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-C]\n")); printf(_(" [--stream [-S slot-name] [--temp-slot]]\n")); printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n")); @@ -156,7 +157,7 @@ help_pg_probackup(void) printf(_(" [--help]\n")); - printf(_("\n %s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s restore -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -175,6 +176,7 @@ help_pg_probackup(void) printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include | --db-exclude]\n")); + printf(_(" [--destroy-all-other-dbs]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); @@ -182,7 +184,7 @@ help_pg_probackup(void) printf(_(" [--archive-port=port] [--archive-user=username]\n")); printf(_(" [--help]\n")); - printf(_("\n %s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n %s validate -B backup-dir [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-i backup-id] [--progress] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -191,18 +193,18 @@ help_pg_probackup(void) printf(_(" [--skip-block-validation]\n")); printf(_(" [--help]\n")); - printf(_("\n %s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n %s checkdb [-B backup-dir] [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [--progress] [-j num-threads]\n")); printf(_(" [--amcheck] [--skip-block-validation]\n")); printf(_(" [--heapallindexed] [--checkunique]\n")); printf(_(" [--help]\n")); - printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME); - printf(_(" [--instance=instance_name [-i backup-id]]\n")); + printf(_("\n %s show -B backup-dir\n"), PROGRAM_NAME); + printf(_(" [--instance=instance-name [-i backup-id]]\n")); printf(_(" [--format=format] [--archive]\n")); printf(_(" [--no-color] [--help]\n")); - printf(_("\n %s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s delete -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-j num-threads] [--progress]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); printf(_(" [--retention-window=retention-window]\n")); @@ -212,24 +214,24 @@ help_pg_probackup(void) printf(_(" [--dry-run] [--no-validate] [--no-sync]\n")); printf(_(" [--help]\n")); - printf(_("\n %s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s merge -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [--progress] [-j num-threads]\n")); printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--help]\n")); - printf(_("\n %s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n %s add-instance -B backup-dir -D pgdata-path\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [--external-dirs=external-directories-paths]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); printf(_(" [--help]\n")); - printf(_("\n %s del-instance -B backup-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n %s del-instance -B backup-dir\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [--help]\n")); - printf(_("\n %s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s archive-push -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -243,7 +245,7 @@ help_pg_probackup(void) printf(_(" [--ssh-options]\n")); printf(_(" [--help]\n")); - printf(_("\n %s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s archive-get -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-path=wal-file-path\n")); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -293,14 +295,14 @@ help_internal(void) static void help_init(void) { - printf(_("\n%s init -B backup-path\n\n"), PROGRAM_NAME); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n\n")); + printf(_("\n%s init -B backup-dir\n\n"), PROGRAM_NAME); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n\n")); } static void help_backup(void) { - printf(_("\n%s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s backup -B backup-dir -b backup-mode --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-C]\n")); printf(_(" [--stream [-S slot-name] [--temp-slot]]\n")); printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n")); @@ -331,9 +333,9 @@ help_backup(void) printf(_(" [--ssh-options]\n")); printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -C, --smooth-checkpoint do smooth checkpoint before backup\n")); printf(_(" --stream stream the transaction log and include it in the backup\n")); @@ -440,7 +442,7 @@ help_backup(void) static void help_restore(void) { - printf(_("\n%s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s restore -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads]\n")); printf(_(" [--progress] [--force] [--no-sync]\n")); printf(_(" [--no-validate] [--skip-block-validation]\n")); @@ -450,6 +452,7 @@ help_restore(void) printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include dbname | --db-exclude dbname]\n")); + printf(_(" [--destroy-all-other-dbs]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); printf(_(" [--recovery-target-timeline=timeline]\n")); @@ -466,8 +469,8 @@ help_restore(void) printf(_(" [--archive-host=hostname] [--archive-port=port]\n")); printf(_(" [--archive-user=username]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -i, --backup-id=backup-id backup to restore\n")); @@ -497,6 +500,9 @@ help_restore(void) printf(_("\n Partial restore options:\n")); printf(_(" --db-include dbname restore only specified databases\n")); printf(_(" --db-exclude dbname do not restore specified databases\n")); + printf(_(" --destroy-all-other-dbs\n")); + printf(_(" allows to do partial restore that is prohibited by default,\n")); + printf(_(" because it might remove all other databases.\n")); printf(_("\n Recovery options:\n")); printf(_(" --recovery-target-time=time time stamp up to which recovery will proceed\n")); @@ -571,7 +577,7 @@ help_restore(void) static void help_validate(void) { - printf(_("\n%s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n%s validate -B backup-dir [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-i backup-id] [--progress] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -579,8 +585,8 @@ help_validate(void) printf(_(" [--recovery-target-name=target-name]\n")); printf(_(" [--skip-block-validation]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to validate\n")); printf(_(" --progress show progress\n")); @@ -628,13 +634,13 @@ help_validate(void) static void help_checkdb(void) { - printf(_("\n%s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n%s checkdb [-B backup-dir] [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-j num-threads] [--progress]\n")); printf(_(" [--amcheck] [--skip-block-validation]\n")); printf(_(" [--heapallindexed] [--checkunique]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" --progress show progress\n")); @@ -689,12 +695,12 @@ help_checkdb(void) static void help_show(void) { - printf(_("\n%s show -B backup-path\n"), PROGRAM_NAME); - printf(_(" [--instance=instance_name [-i backup-id]]\n")); + printf(_("\n%s show -B backup-dir\n"), PROGRAM_NAME); + printf(_(" [--instance=instance-name [-i backup-id]]\n")); printf(_(" [--format=format] [--archive]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name show info about specific instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name show info about specific instance\n")); printf(_(" -i, --backup-id=backup-id show info about specific backups\n")); printf(_(" --archive show WAL archive information\n")); printf(_(" --format=format show format=PLAIN|JSON\n")); @@ -704,7 +710,7 @@ help_show(void) static void help_delete(void) { - printf(_("\n%s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s delete -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-i backup-id | --delete-expired | --merge-expired] [--delete-wal]\n")); printf(_(" [-j num-threads] [--progress]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); @@ -712,8 +718,8 @@ help_delete(void) printf(_(" [--wal-depth=wal-depth]\n")); printf(_(" [--no-validate] [--no-sync]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to delete\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); printf(_(" --progress show progress\n")); @@ -767,7 +773,7 @@ help_delete(void) static void help_merge(void) { - printf(_("\n%s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s merge -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [-j num-threads] [--progress]\n")); printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); @@ -780,8 +786,8 @@ help_merge(void) printf(_(" [--log-rotation-size=log-rotation-size]\n")); printf(_(" [--log-rotation-age=log-rotation-age]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to merge\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); @@ -821,7 +827,7 @@ help_merge(void) static void help_set_backup(void) { - printf(_("\n%s set-backup -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s set-backup -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id\n")); printf(_(" [--ttl=interval] [--expire-time=time] [--note=text]\n\n")); @@ -837,7 +843,7 @@ help_set_backup(void) static void help_set_config(void) { - printf(_("\n%s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s set-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path]\n")); printf(_(" [-E external-directories-paths]\n")); printf(_(" [--restore-command=cmdline]\n")); @@ -860,8 +866,8 @@ help_set_config(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -E --external-dirs=external-directories-paths\n")); printf(_(" backup some directories not from pgdata \n")); @@ -943,27 +949,28 @@ help_set_config(void) static void help_show_config(void) { - printf(_("\n%s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s show-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [--format=format]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); - printf(_(" --format=format show format=PLAIN|JSON\n\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); + printf(_(" --format=format show format=PLAIN|JSON\n")); + printf(_(" --no-scale-units show memory and time values in default units\n\n")); } static void help_add_instance(void) { - printf(_("\n%s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n%s add-instance -B backup-dir -D pgdata-path\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [-E external-directory-path]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); - printf(_(" --instance=instance_name name of the new instance\n")); + printf(_(" --instance=instance-name name of the new instance\n")); printf(_(" -E --external-dirs=external-directories-paths\n")); printf(_(" backup some directories not from pgdata \n")); @@ -978,21 +985,45 @@ help_add_instance(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); + + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); } static void help_del_instance(void) { - printf(_("\n%s del-instance -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s del-instance -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n\n")); } static void help_archive_push(void) { - printf(_("\n%s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s archive-push -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -1005,8 +1036,8 @@ help_archive_push(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n")); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" name of the file to copy into WAL archive\n")); printf(_(" --wal-file-path=wal-file-path\n")); @@ -1025,6 +1056,30 @@ help_archive_push(void) printf(_(" --compress-level=compress-level\n")); printf(_(" level of compression [0-9] (default: 1)\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); @@ -1040,7 +1095,7 @@ help_archive_push(void) static void help_archive_get(void) { - printf(_("\n%s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s archive-get -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -1049,8 +1104,8 @@ help_archive_get(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n")); printf(_(" --wal-file-path=wal-file-path\n")); printf(_(" relative destination path name of the WAL file on the server\n")); printf(_(" --wal-file-name=wal-file-name\n")); @@ -1060,6 +1115,30 @@ help_archive_get(void) printf(_(" --prefetch-dir=path location of the store area for prefetched WAL files\n")); printf(_(" --no-validate-wal skip validation of prefetched WAL file before using it\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); @@ -1126,6 +1205,30 @@ help_catchup(void) printf(_(" -w, --no-password never prompt for password\n")); printf(_(" -W, --password force password prompt\n\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); diff --git a/src/init.c b/src/init.c index 8773016b5..837e2bad0 100644 --- a/src/init.c +++ b/src/init.c @@ -24,11 +24,11 @@ do_init(CatalogState *catalogState) results = pg_check_dir(catalogState->catalog_path); if (results == 4) /* exists and not empty*/ - elog(ERROR, "backup catalog already exist and it's not empty"); + elog(ERROR, "The backup catalog already exists and is not empty"); else if (results == -1) /*trouble accessing directory*/ { int errno_tmp = errno; - elog(ERROR, "cannot open backup catalog directory \"%s\": %s", + elog(ERROR, "Cannot open backup catalog directory \"%s\": %s", catalogState->catalog_path, strerror(errno_tmp)); } @@ -41,7 +41,7 @@ do_init(CatalogState *catalogState) /* create backup catalog wal directory */ dir_create_dir(catalogState->wal_subdir_path, DIR_PERMISSION, false); - elog(INFO, "Backup catalog '%s' successfully inited", catalogState->catalog_path); + elog(INFO, "Backup catalog '%s' successfully initialized", catalogState->catalog_path); return 0; } @@ -53,8 +53,9 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) /* PGDATA is always required */ if (instance->pgdata == NULL) - elog(ERROR, "Required parameter not specified: PGDATA " - "(-D, --pgdata)"); + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); /* Read system_identifier from PGDATA */ instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST, false); @@ -121,6 +122,6 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) /* pgdata was set through command line */ do_set_config(instanceState, true); - elog(INFO, "Instance '%s' successfully inited", instanceState->instance_name); + elog(INFO, "Instance '%s' successfully initialized", instanceState->instance_name); return 0; } diff --git a/src/merge.c b/src/merge.c index 0017c9e9c..e8f926795 100644 --- a/src/merge.c +++ b/src/merge.c @@ -79,10 +79,10 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool int i; if (backup_id == INVALID_BACKUP_ID) - elog(ERROR, "required parameter is not specified: --backup-id"); + elog(ERROR, "Required parameter is not specified: --backup-id"); if (instanceState == NULL) - elog(ERROR, "required parameter is not specified: --instance"); + elog(ERROR, "Required parameter is not specified: --instance"); elog(INFO, "Merge started"); @@ -337,7 +337,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool else { if ((full_backup->status == BACKUP_STATUS_MERGED || - full_backup->status == BACKUP_STATUS_MERGED) && + full_backup->status == BACKUP_STATUS_MERGING) && dest_backup->start_time != full_backup->merge_dest_backup) { elog(ERROR, "Full backup %s has unfinished merge with backup %s", @@ -887,7 +887,7 @@ merge_chain(InstanceState *instanceState, pfree(threads); } - if (result_filelist && parray_num(result_filelist) > 0) + if (result_filelist) { parray_walk(result_filelist, pgFileFree); parray_free(result_filelist); @@ -1067,7 +1067,7 @@ merge_files(void *arg) tmp_file->hdr_crc = file->hdr_crc; } else - tmp_file->uncompressed_size = tmp_file->uncompressed_size; + tmp_file->uncompressed_size = file->uncompressed_size; /* Copy header metadata from old map into a new one */ tmp_file->n_headers = file->n_headers; diff --git a/src/parsexlog.c b/src/parsexlog.c index 7c4b5b349..7df169fbf 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -1588,9 +1588,14 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) reader_data = (XLogReaderData *) xlogreader->private_data; reader_data->need_switch = false; +start: /* Critical section */ pthread_lock(&wal_segment_mutex); Assert(segno_next); + + if (reader_data->xlogsegno > segno_next) + segno_next = reader_data->xlogsegno; + reader_data->xlogsegno = segno_next; segnum_read++; segno_next++; @@ -1604,6 +1609,7 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) GetXLogRecPtr(reader_data->xlogsegno, 0, wal_seg_size, arg->startpoint); /* We need to close previously opened file if it wasn't closed earlier */ CleanupXLogPageRead(xlogreader); + xlogreader->currRecPtr = InvalidXLogRecPtr; /* Skip over the page header and contrecord if any */ found = XLogFindNextRecord(xlogreader, arg->startpoint); @@ -1613,6 +1619,8 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) */ if (XLogRecPtrIsInvalid(found)) { + if (reader_data->need_switch) + goto start; /* * Check if we need to stop reading. We stop if other thread found a * target segment. diff --git a/src/pg_probackup.c b/src/pg_probackup.c index 849685278..fa67ddff5 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -98,7 +98,7 @@ static char *target_time = NULL; static char *target_xid = NULL; static char *target_lsn = NULL; static char *target_inclusive = NULL; -static TimeLineID target_tli; +static char *target_tli_string; /* timeline number, "current" or "latest"*/ static char *target_stop; static bool target_immediate; static char *target_name = NULL; @@ -124,6 +124,7 @@ static parray *datname_include_list = NULL; static parray *exclude_absolute_paths_list = NULL; static parray *exclude_relative_paths_list = NULL; static char* gl_waldir_path = NULL; +static bool allow_partial_incremental = false; /* checkdb options */ bool need_amcheck = false; @@ -163,6 +164,7 @@ bool no_validate_wal = false; /* show options */ ShowFormat show_format = SHOW_PLAIN; bool show_archive = false; +static bool show_base_units = false; /* set-backup options */ int64 ttl = -1; @@ -226,7 +228,7 @@ static ConfigOption cmd_options[] = { 's', 137, "recovery-target-xid", &target_xid, SOURCE_CMD_STRICT }, { 's', 144, "recovery-target-lsn", &target_lsn, SOURCE_CMD_STRICT }, { 's', 138, "recovery-target-inclusive", &target_inclusive, SOURCE_CMD_STRICT }, - { 'u', 139, "recovery-target-timeline", &target_tli, SOURCE_CMD_STRICT }, + { 's', 139, "recovery-target-timeline", &target_tli_string, SOURCE_CMD_STRICT }, { 's', 157, "recovery-target", &target_stop, SOURCE_CMD_STRICT }, { 'f', 'T', "tablespace-mapping", opt_tablespace_map, SOURCE_CMD_STRICT }, { 'f', 155, "external-mapping", opt_externaldir_map, SOURCE_CMD_STRICT }, @@ -242,6 +244,7 @@ static ConfigOption cmd_options[] = { 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT }, { 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT }, { 's', 'X', "waldir", &gl_waldir_path, SOURCE_CMD_STRICT }, + { 'b', 242, "destroy-all-other-dbs", &allow_partial_incremental, SOURCE_CMD_STRICT }, /* checkdb options */ { 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT }, { 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT }, @@ -273,6 +276,8 @@ static ConfigOption cmd_options[] = /* show options */ { 'f', 165, "format", opt_show_format, SOURCE_CMD_STRICT }, { 'b', 166, "archive", &show_archive, SOURCE_CMD_STRICT }, + /* show-config options */ + { 'b', 167, "no-scale-units", &show_base_units,SOURCE_CMD_STRICT }, /* set-backup options */ { 'I', 170, "ttl", &ttl, SOURCE_CMD_STRICT, SOURCE_DEFAULT, 0, OPTION_UNIT_S, option_get_value}, { 's', 171, "expire-time", &expire_time_string, SOURCE_CMD_STRICT }, @@ -283,7 +288,7 @@ static ConfigOption cmd_options[] = { 's', 136, "time", &target_time, SOURCE_CMD_STRICT }, { 's', 137, "xid", &target_xid, SOURCE_CMD_STRICT }, { 's', 138, "inclusive", &target_inclusive, SOURCE_CMD_STRICT }, - { 'u', 139, "timeline", &target_tli, SOURCE_CMD_STRICT }, + { 's', 139, "timeline", &target_tli_string, SOURCE_CMD_STRICT }, { 's', 144, "lsn", &target_lsn, SOURCE_CMD_STRICT }, { 'b', 140, "immediate", &target_immediate, SOURCE_CMD_STRICT }, @@ -491,7 +496,10 @@ main(int argc, char *argv[]) backup_subcmd != HELP_CMD && backup_subcmd != VERSION_CMD && backup_subcmd != CATCHUP_CMD) - elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)"); + elog(ERROR, + "No backup catalog path specified.\n" + "Please specify it either using environment variable BACKUP_PATH or\n" + "command line option --backup-path (-B)"); /* ===== catalogState (END) ======*/ @@ -505,7 +513,7 @@ main(int argc, char *argv[]) { if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD && backup_subcmd != VALIDATE_CMD && backup_subcmd != CHECKDB_CMD && backup_subcmd != CATCHUP_CMD) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); } else { @@ -618,7 +626,7 @@ main(int argc, char *argv[]) backup_path != NULL && instance_name == NULL && instance_config.pgdata == NULL) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); /* Check checkdb command options consistency */ if (backup_subcmd == CHECKDB_CMD && @@ -677,6 +685,7 @@ main(int argc, char *argv[]) if (instance_config.pgdata != NULL) canonicalize_path(instance_config.pgdata); if (instance_config.pgdata != NULL && + (backup_subcmd != ARCHIVE_GET_CMD && backup_subcmd != CATCHUP_CMD) && !is_absolute_path(instance_config.pgdata)) elog(ERROR, "-D, --pgdata must be an absolute path"); @@ -733,7 +742,7 @@ main(int argc, char *argv[]) */ recovery_target_options = parseRecoveryTargetOptions(target_time, target_xid, - target_inclusive, target_tli, target_lsn, + target_inclusive, target_tli_string, target_lsn, (target_stop != NULL) ? target_stop : (target_immediate) ? "immediate" : NULL, target_name, target_action); @@ -760,6 +769,7 @@ main(int argc, char *argv[]) restore_params->partial_restore_type = NONE; restore_params->primary_conninfo = primary_conninfo; restore_params->incremental_mode = incremental_mode; + restore_params->allow_partial_incremental = allow_partial_incremental; /* handle partial restore parameters */ if (datname_exclude_list && datname_include_list) @@ -830,14 +840,16 @@ main(int argc, char *argv[]) if (catchup_destination_pgdata == NULL) elog(ERROR, "You must specify \"--destination-pgdata\" option with the \"%s\" command", get_subcmd_name(backup_subcmd)); if (current.backup_mode == BACKUP_MODE_INVALID) - elog(ERROR, "Required parameter not specified: BACKUP_MODE (-b, --backup-mode)"); + elog(ERROR, "No backup mode specified.\n" + "Please specify it either using environment variable BACKUP_MODE or\n" + "command line option --backup-mode (-b)"); if (current.backup_mode != BACKUP_MODE_FULL && current.backup_mode != BACKUP_MODE_DIFF_PTRACK && current.backup_mode != BACKUP_MODE_DIFF_DELTA) elog(ERROR, "Only \"FULL\", \"PTRACK\" and \"DELTA\" modes are supported with the \"%s\" command", get_subcmd_name(backup_subcmd)); if (!stream_wal) elog(INFO, "--stream is required, forcing stream mode"); current.stream = stream_wal = true; if (instance_config.external_dir_str) - elog(ERROR, "external directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd)); + elog(ERROR, "External directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd)); // TODO check instance_config.conn_opt } @@ -984,8 +996,9 @@ main(int argc, char *argv[]) /* sanity */ if (current.backup_mode == BACKUP_MODE_INVALID) - elog(ERROR, "required parameter not specified: BACKUP_MODE " - "(-b, --backup-mode)"); + elog(ERROR, "No backup mode specified.\n" + "Please specify it either using environment variable BACKUP_MODE or\n" + "command line option --backup-mode (-b)"); return do_backup(instanceState, set_backup_params, no_validate, no_sync, backup_logs, start_time); @@ -1039,7 +1052,7 @@ main(int argc, char *argv[]) do_merge(instanceState, current.backup_id, no_validate, no_sync); break; case SHOW_CONFIG_CMD: - do_show_config(); + do_show_config(show_base_units); break; case SET_CONFIG_CMD: do_set_config(instanceState, false); @@ -1185,8 +1198,8 @@ opt_datname_exclude_list(ConfigOption *opt, const char *arg) void opt_datname_include_list(ConfigOption *opt, const char *arg) { - if (strcmp(arg, "tempate0") == 0 || - strcmp(arg, "tempate1") == 0) + if (strcmp(arg, "template0") == 0 || + strcmp(arg, "template1") == 0) elog(ERROR, "Databases 'template0' and 'template1' cannot be used for partial restore or validation"); opt_parser_add_to_parray_helper(&datname_include_list, arg); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index fa3bc4123..ae99e0605 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -91,6 +91,7 @@ extern const char *PROGRAM_EMAIL; #define DATABASE_MAP "database_map" #define HEADER_MAP "page_header_map" #define HEADER_MAP_TMP "page_header_map_tmp" +#define XLOG_CONTROL_BAK_FILE XLOG_CONTROL_FILE".pbk.bak" /* default replication slot names */ #define DEFAULT_TEMP_SLOT_NAME "pg_probackup_slot"; @@ -110,6 +111,8 @@ extern const char *PROGRAM_EMAIL; /* 64-bit xid support for PGPRO_EE */ #ifndef PGPRO_EE #define XID_FMT "%u" +#elif !defined(XID_FMT) +#define XID_FMT UINT64_FORMAT #endif #ifndef STDIN_FILENO @@ -179,6 +182,7 @@ typedef enum DestDirIncrCompatibility POSTMASTER_IS_RUNNING, SYSTEM_ID_MISMATCH, BACKUP_LABEL_EXISTS, + PARTIAL_INCREMENTAL_FORBIDDEN, DEST_IS_NOT_OK, DEST_OK } DestDirIncrCompatibility; @@ -222,7 +226,9 @@ typedef enum ForkName fsm, cfm, init, - ptrack + ptrack, + cfs_bck, + cfm_bck } ForkName; #define INIT_FILE_CRC32(use_crc32c, crc) \ @@ -278,6 +284,7 @@ typedef struct pgFile int segno; /* Segment number for ptrack */ int n_blocks; /* number of blocks in the data file in data directory */ bool is_cfs; /* Flag to distinguish files compressed by CFS*/ + struct pgFile *cfs_chain; /* linked list of CFS segment's cfm, bck, cfm_bck related files */ int external_dir_num; /* Number of external directory. 0 if not external */ bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */ CompressAlg compress_alg; /* compression algorithm applied to the file */ @@ -292,6 +299,8 @@ typedef struct pgFile pg_off_t hdr_off; /* offset in header map */ int hdr_size; /* length of headers */ bool excluded; /* excluded via --exclude-path option */ + bool skip_cfs_nested; /* mark to skip in processing treads as nested to cfs_chain */ + bool remove_from_list; /* tmp flag to clean up files list from temp and unlogged tables */ } pgFile; typedef struct page_map_entry @@ -347,7 +356,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.11" +#define PROGRAM_VERSION "2.5.15" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20509 @@ -556,6 +565,7 @@ typedef struct pgRecoveryTarget const char *target_stop; const char *target_name; const char *target_action; + const char *target_tli_string; /* timeline number, "current" or "latest" from recovery_target_timeline option*/ } pgRecoveryTarget; /* Options needed for restore and validate commands */ @@ -580,7 +590,8 @@ typedef struct pgRestoreParams /* options for partial restore */ PartialRestoreType partial_restore_type; parray *partial_db_list; - + bool allow_partial_incremental; + char* waldir; } pgRestoreParams; @@ -777,6 +788,11 @@ typedef struct StopBackupCallbackParams strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ strcmp((fname) + XLOG_FNAME_LEN, ".part") == 0) +#define IsTempPartialXLogFileName(fname) \ + (strlen(fname) == XLOG_FNAME_LEN + strlen(".partial.part") && \ + strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ + strcmp((fname) + XLOG_FNAME_LEN, ".partial.part") == 0) + #define IsTempCompressXLogFileName(fname) \ (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz.part") && \ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ @@ -879,7 +895,7 @@ extern bool satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt); extern pgRecoveryTarget *parseRecoveryTargetOptions( const char *target_time, const char *target_xid, - const char *target_inclusive, TimeLineID target_tli, const char* target_lsn, + const char *target_inclusive, const char *target_tli_string, const char* target_lsn, const char *target_stop, const char *target_name, const char *target_action); @@ -893,7 +909,9 @@ extern parray *get_backup_filelist(pgBackup *backup, bool strict); extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict); extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); extern DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, - IncrRestoreMode incremental_mode); + IncrRestoreMode incremental_mode, + parray *partial_db_list, + bool allow_partial_incremental); /* in remote.c */ extern void check_remote_agent_compatibility(int agent_version, @@ -921,7 +939,7 @@ extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instanc char *wal_file_name, int batch_size, bool validate_wal); /* in configure.c */ -extern void do_show_config(void); +extern void do_show_config(bool show_base_units); extern void do_set_config(InstanceState *instanceState, bool missing_ok); extern void init_config(InstanceConfig *config, const char *instance_name); extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); @@ -1192,6 +1210,8 @@ extern uint32 get_xlog_seg_size(const char *pgdata_path); extern void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo); extern void set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn); +extern void get_control_file_or_back_file(const char *pgdata_path, fio_location location, + ControlFileData *control); extern void copy_pgcontrol_file(const char *from_fullpath, fio_location from_location, const char *to_fullpath, fio_location to_location, pgFile *file); diff --git a/src/ptrack.c b/src/ptrack.c index ebcba1dd4..d27629e45 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -214,7 +214,7 @@ pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema, pfree(params[0]); if (PQnfields(res) != 2) - elog(ERROR, "cannot get ptrack pagemapset"); + elog(ERROR, "Cannot get ptrack pagemapset"); /* sanity ? */ diff --git a/src/restore.c b/src/restore.c index 6c0e1881f..f9310dcee 100644 --- a/src/restore.c +++ b/src/restore.c @@ -39,6 +39,8 @@ typedef struct int ret; } restore_files_arg; +static bool control_downloaded = false; +static ControlFileData instance_control; static void print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup, @@ -131,13 +133,14 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg XLogRecPtr shift_lsn = InvalidXLogRecPtr; if (instanceState == NULL) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); if (params->is_restore) { if (instance_config.pgdata == NULL) - elog(ERROR, - "required parameter not specified: PGDATA (-D, --pgdata)"); + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); /* Check if restore destination empty */ if (!dir_is_empty(instance_config.pgdata, FIO_DB_HOST)) @@ -149,6 +152,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (params->incremental_mode != INCR_NONE) { DestDirIncrCompatibility rc; + const char *message = NULL; bool ok_to_go = true; elog(INFO, "Running incremental restore into nonempty directory: \"%s\"", @@ -156,12 +160,15 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg rc = check_incremental_compatibility(instance_config.pgdata, instance_config.system_identifier, - params->incremental_mode); + params->incremental_mode, + params->partial_db_list, + params->allow_partial_incremental); if (rc == POSTMASTER_IS_RUNNING) { /* Even with force flag it is unwise to run * incremental restore over running instance */ + message = "Postmaster is running."; ok_to_go = false; } else if (rc == SYSTEM_ID_MISMATCH) @@ -173,7 +180,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (params->incremental_mode != INCR_NONE && params->force) cleanup_pgdata = true; else + { + message = "System ID mismatch."; ok_to_go = false; + } } else if (rc == BACKUP_LABEL_EXISTS) { @@ -186,7 +196,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg * to calculate switchpoint. */ if (params->incremental_mode == INCR_LSN) + { + message = "Backup label exists. Cannot use incremental restore in LSN mode."; ok_to_go = false; + } } else if (rc == DEST_IS_NOT_OK) { @@ -195,11 +208,16 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg * so we cannot be sure that postmaster is running or not. * It is better to just error out. */ + message = "We cannot be sure about the database state."; + ok_to_go = false; + } else if (rc == PARTIAL_INCREMENTAL_FORBIDDEN) + { + message = "Partial incremental restore into non-empty PGDATA is forbidden."; ok_to_go = false; } if (!ok_to_go) - elog(ERROR, "Incremental restore is not allowed"); + elog(ERROR, "Incremental restore is not allowed: %s", message); } else elog(ERROR, "Restore destination is not empty: \"%s\"", @@ -290,7 +308,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (!satisfy_timeline(timelines, current_backup->tli, current_backup->stop_lsn)) { if (target_backup_id != INVALID_BACKUP_ID) - elog(ERROR, "target backup %s does not satisfy target timeline", + elog(ERROR, "Target backup %s does not satisfy target timeline", base36enc(target_backup_id)); else /* Try to find another backup that satisfies target timeline */ @@ -485,6 +503,9 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (redo.checksum_version == 0) elog(ERROR, "Incremental restore in 'lsn' mode require " "data_checksums to be enabled in destination data directory"); + if (!control_downloaded) + get_control_file_or_back_file(instance_config.pgdata, FIO_DB_HOST, + &instance_control); timelines = read_timeline_history(instanceState->instance_wal_subdir_path, redo.tli, false); @@ -666,6 +687,11 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg backup_id_of(dest_backup), dest_backup->server_version); + if (instance_config.remote.host) + elog(INFO, "Restoring the database from backup %s on %s", backup_id_of(dest_backup), instance_config.remote.host); + else + elog(INFO, "Restoring the database from backup %s", backup_id_of(dest_backup)); + restore_chain(dest_backup, parent_chain, dbOid_exclude_list, params, instance_config.pgdata, no_sync, cleanup_pgdata, backup_has_tblspc); @@ -699,10 +725,13 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, bool backup_has_tblspc) { int i; - char timestamp[100]; parray *pgdata_files = NULL; parray *dest_files = NULL; parray *external_dirs = NULL; + pgFile *dest_pg_control_file = NULL; + char dest_pg_control_fullpath[MAXPGPATH]; + char dest_pg_control_bak_fullpath[MAXPGPATH]; + /* arrays with meta info for multi threaded backup */ pthread_t *threads; restore_files_arg *threads_args; @@ -718,9 +747,6 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, time_t start_time, end_time; /* Preparations for actual restoring */ - time2iso(timestamp, lengthof(timestamp), dest_backup->start_time, false); - elog(INFO, "Restoring the database from backup at %s", timestamp); - dest_files = get_backup_filelist(dest_backup, true); /* Lock backup chain and make sanity checks */ @@ -776,7 +802,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, use_bitmap = false; if (params->incremental_mode != INCR_NONE) - elog(ERROR, "incremental restore is not possible for backups older than 2.3.0 version"); + elog(ERROR, "Incremental restore is not possible for backups older than 2.3.0 version"); } /* There is no point in bitmap restore, when restoring a single FULL backup, @@ -906,6 +932,11 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) redundant = true; + /* global/pg_control.pbk.bak are always keeped, because it's needed for restart failed incremental restore */ + if (file->external_dir_num == 0 && + pg_strcasecmp(file->rel_path, XLOG_CONTROL_BAK_FILE) == 0) + redundant = false; + /* do not delete the useful internal directories */ if (S_ISDIR(file->mode) && !redundant) continue; @@ -958,6 +989,42 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, dest_bytes = dest_backup->pgdata_bytes; pretty_size(dest_bytes, pretty_dest_bytes, lengthof(pretty_dest_bytes)); + /* + * [Issue #313] + * find pg_control file (in already sorted earlier dest_files, see parray_qsort(backup->files...)) + * and exclude it from list for future special processing + */ + { + int control_file_elem_index; + pgFile search_key; + MemSet(&search_key, 0, sizeof(pgFile)); + /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */ + search_key.rel_path = XLOG_CONTROL_FILE; + search_key.external_dir_num = 0; + control_file_elem_index = parray_bsearch_index(dest_files, &search_key, pgFileCompareRelPathWithExternal); + + if (control_file_elem_index < 0) + elog(ERROR, "File \"%s\" not found in backup %s", XLOG_CONTROL_FILE, base36enc(dest_backup->start_time)); + dest_pg_control_file = (pgFile *) parray_get(dest_files, control_file_elem_index); + parray_remove(dest_files, control_file_elem_index); + + join_path_components(dest_pg_control_fullpath, pgdata_path, XLOG_CONTROL_FILE); + join_path_components(dest_pg_control_bak_fullpath, pgdata_path, XLOG_CONTROL_BAK_FILE); + /* + * rename (if it exist) dest control file before restoring + * if it doesn't exist, that mean, that we already restoring in a previously failed + * pgdata, where XLOG_CONTROL_BAK_FILE exist + */ + if (params->incremental_mode != INCR_NONE) + { + if (fio_access(dest_pg_control_fullpath,F_OK,FIO_DB_HOST) == 0){ + if (fio_rename(dest_pg_control_fullpath, dest_pg_control_bak_fullpath, FIO_DB_HOST) < 0) + elog(WARNING, "Cannot rename file \"%s\" to \"%s\": %s", + dest_pg_control_fullpath, dest_pg_control_bak_fullpath, strerror(errno)); + } + } + } + elog(INFO, "Start restoring backup files. PGDATA size: %s", pretty_dest_bytes); time(&start_time); thread_interrupted = false; @@ -998,6 +1065,32 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, total_bytes += threads_args[i].restored_bytes; } + /* [Issue #313] copy pg_control at very end */ + if (restore_isok) + { + FILE *out = NULL; + elog(progress ? INFO : LOG, "Progress: Restore file \"%s\"", + dest_pg_control_file->rel_path); + + out = fio_fopen(dest_pg_control_fullpath, PG_BINARY_R "+", FIO_DB_HOST); + + total_bytes += restore_non_data_file(parent_chain, + dest_backup, + dest_pg_control_file, + out, + dest_pg_control_fullpath, false); + fio_fclose(out); + /* Now backup control file can be deleted */ + if (params->incremental_mode != INCR_NONE) + { + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } + } + time(&end_time); pretty_time_interval(difftime(end_time, start_time), pretty_time, lengthof(pretty_time)); @@ -1082,6 +1175,8 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, parray_free(pgdata_files); } + if(dest_pg_control_file) pgFileFree(dest_pg_control_file); + for (i = parray_num(parent_chain) - 1; i >= 0; i--) { pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); @@ -1316,8 +1411,10 @@ create_recovery_conf(InstanceState *instanceState, time_t backup_id, } /* restore-target='latest' support */ - target_latest = rt->target_stop != NULL && - strcmp(rt->target_stop, "latest") == 0; + target_latest = (rt->target_tli_string != NULL && + strcmp(rt->target_tli_string, "latest") == 0) || + (rt->target_stop != NULL && + strcmp(rt->target_stop, "latest") == 0); target_immediate = rt->target_stop != NULL && strcmp(rt->target_stop, "immediate") == 0; @@ -1343,6 +1440,13 @@ create_recovery_conf(InstanceState *instanceState, time_t backup_id, rt->xid_string || rt->lsn_string || rt->target_name || target_immediate || target_latest || restore_command_provided) params->recovery_settings_mode = PITR_REQUESTED; + /* + * The recovery-target-timeline option can be 'latest' for streaming backups. + * This operation requires a WAL archive for PITR. + */ + if (rt->target_tli && backup->stream && params->recovery_settings_mode != PITR_REQUESTED) + elog(WARNING, "The '--recovery-target-timeline' option applied for STREAM backup. " + "The timeline number will be ignored."); elog(LOG, "----------------------------------------"); @@ -1422,14 +1526,20 @@ print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup fio_fprintf(fp, "recovery_target_timeline = '%u'\n", rt->target_tli); else { + if (rt->target_tli_string) + fio_fprintf(fp, "recovery_target_timeline = '%s'\n", rt->target_tli_string); + else if (rt->target_stop && (strcmp(rt->target_stop, "latest") == 0)) + fio_fprintf(fp, "recovery_target_timeline = 'latest'\n"); #if PG_VERSION_NUM >= 120000 - + else + { /* * In PG12 default recovery target timeline was changed to 'latest', which * is extremely risky. Explicitly preserve old behavior of recovering to current * timneline for PG12. */ fio_fprintf(fp, "recovery_target_timeline = 'current'\n"); + } #endif } @@ -1479,7 +1589,7 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu fp = fio_fopen(path, "w", FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_chmod(path, FILE_PERMISSION, FIO_DB_HOST) == -1) @@ -1499,7 +1609,7 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } #endif @@ -1538,7 +1648,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { /* file not found is not an error case */ if (errno != ENOENT) - elog(ERROR, "cannot stat file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot stat file \"%s\": %s", postgres_auto_path, strerror(errno)); st.st_size = 0; } @@ -1548,13 +1658,13 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { fp = fio_open_stream(postgres_auto_path, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", postgres_auto_path, strerror(errno)); } sprintf(postgres_auto_path_tmp, "%s.tmp", postgres_auto_path); fp_tmp = fio_fopen(postgres_auto_path_tmp, "w", FIO_DB_HOST); if (fp_tmp == NULL) - elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); while (fp && fgets(line, lengthof(line), fp)) { @@ -1612,7 +1722,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { fp = fio_fopen(postgres_auto_path, "a", FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot open file \"%s\": %s", postgres_auto_path, strerror(errno)); fio_fprintf(fp, "\n# recovery settings added by pg_probackup restore of backup %s at '%s'\n", @@ -1626,7 +1736,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot write file \"%s\": %s", postgres_auto_path, strerror(errno)); /* @@ -1646,12 +1756,12 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } @@ -1662,12 +1772,12 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } } @@ -1704,12 +1814,12 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict if (fd == NULL) { if (errno != ENOENT) - elog(ERROR, "could not open file \"%s\": %s", path, + elog(ERROR, "Could not open file \"%s\": %s", path, strerror(errno)); /* There is no history file for target timeline */ if (strict) - elog(ERROR, "recovery target timeline %u does not exist", + elog(ERROR, "Recovery target timeline %u does not exist", targetTLI); else return NULL; @@ -1743,12 +1853,12 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict { /* expect a numeric timeline ID as first field of line */ elog(ERROR, - "syntax error in history file: %s. Expected a numeric timeline ID.", + "Syntax error in history file: %s. Expected a numeric timeline ID.", fline); } if (nfields != 3) elog(ERROR, - "syntax error in history file: %s. Expected a transaction log switchpoint location.", + "Syntax error in history file: %s. Expected a transaction log switchpoint location.", fline); if (last_timeline && tli <= last_timeline->tli) @@ -1861,7 +1971,7 @@ pgRecoveryTarget * parseRecoveryTargetOptions(const char *target_time, const char *target_xid, const char *target_inclusive, - TimeLineID target_tli, + const char *target_tli_string, const char *target_lsn, const char *target_stop, const char *target_name, @@ -1934,7 +2044,20 @@ parseRecoveryTargetOptions(const char *target_time, target_inclusive); } - rt->target_tli = target_tli; + rt->target_tli_string = target_tli_string; + rt->target_tli = 0; + /* target_tli can contains timeline number, "current" or "latest" */ + if(target_tli_string && strcmp(target_tli_string, "current") != 0 && strcmp(target_tli_string, "latest") != 0) + { + errno = 0; + rt->target_tli = strtoul(target_tli_string, NULL, 10); + if (errno == EINVAL || errno == ERANGE || !rt->target_tli) + { + elog(ERROR, "Invalid value for '--recovery-target-timeline' option '%s'", + target_tli_string); + } + } + if (target_stop) { if ((strcmp(target_stop, "immediate") != 0) @@ -2141,7 +2264,9 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, */ DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, - IncrRestoreMode incremental_mode) + IncrRestoreMode incremental_mode, + parray *partial_db_list, + bool allow_partial_incremental) { uint64 system_id_pgdata; bool system_id_match = false; @@ -2184,7 +2309,10 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, */ elog(LOG, "Trying to read pg_control file in destination directory"); - system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false); + get_control_file_or_back_file(pgdata, FIO_DB_HOST, &instance_control); + control_downloaded = true; + + system_id_pgdata = instance_control.system_identifier; if (system_id_pgdata == instance_config.system_identifier) system_id_match = true; @@ -2225,6 +2353,8 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, if (backup_label_exists) return BACKUP_LABEL_EXISTS; + if (partial_db_list && !allow_partial_incremental) + return PARTIAL_INCREMENTAL_FORBIDDEN; /* some other error condition */ if (!success) return DEST_IS_NOT_OK; diff --git a/src/show.c b/src/show.c index 2e06582ed..810262df6 100644 --- a/src/show.c +++ b/src/show.c @@ -67,6 +67,7 @@ static void show_archive_plain(const char *instance_name, uint32 xlog_seg_size, parray *timelines_list, bool show_name); static void show_archive_json(const char *instance_name, uint32 xlog_seg_size, parray *tli_list); +static bool backup_has_tablespace_map(pgBackup *backup); static PQExpBufferData show_buf; static bool first_instance = true; @@ -137,7 +138,7 @@ do_show(CatalogState *catalogState, InstanceState *instanceState, show_instance_start(); for (i = 0; i < parray_num(instances); i++) { - InstanceState *instanceState = parray_get(instances, i); + instanceState = parray_get(instances, i); if (interrupted) elog(ERROR, "Interrupted during show"); @@ -202,22 +203,22 @@ pretty_size(int64 size, char *buf, size_t len) return; } - if (Abs(size) < limit) + if (size < limit) snprintf(buf, len, "%dB", (int) size); else { size >>= 9; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dkB", (int) half_rounded(size)); else { size >>= 10; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dMB", (int) half_rounded(size)); else { size >>= 10; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dGB", (int) half_rounded(size)); else { @@ -452,7 +453,7 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) appendPQExpBuffer(buf, INT64_FORMAT, backup->uncompressed_bytes); } - if (backup->uncompressed_bytes >= 0) + if (backup->pgdata_bytes >= 0) { json_add_key(buf, "pgdata-bytes", json_level); appendPQExpBuffer(buf, INT64_FORMAT, backup->pgdata_bytes); @@ -479,6 +480,32 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) appendPQExpBuffer(buf, "%u", backup->content_crc); } + /* print tablespaces list */ + if (backup_has_tablespace_map(backup)) + { + parray *links = parray_new(); + + json_add_key(buf, "tablespace_map", json_level); + json_add(buf, JT_BEGIN_ARRAY, &json_level); + + read_tablespace_map(links, backup->root_dir); + parray_qsort(links, pgFileCompareLinked); + + for (size_t i = 0; i < parray_num(links); i++){ + pgFile *link = (pgFile *) parray_get(links, i); + if (i) + appendPQExpBufferChar(buf, ','); + json_add(buf, JT_BEGIN_OBJECT, &json_level); + json_add_value(buf, "oid", link->name, json_level, true); + json_add_value(buf, "path", link->linked, json_level, true); + json_add(buf, JT_END_OBJECT, &json_level); + } + /* End of tablespaces */ + json_add(buf, JT_END_ARRAY, &json_level); + parray_walk(links, pgFileFree); + parray_free(links); + } + json_add(buf, JT_END_OBJECT, &json_level); } @@ -514,12 +541,34 @@ show_backup(InstanceState *instanceState, time_t requested_backup_id) elog(INFO, "Requested backup \"%s\" is not found.", /* We do not need free base36enc's result, we exit anyway */ base36enc(requested_backup_id)); + parray_walk(backups, pgBackupFree); + parray_free(backups); /* This is not error */ return 0; } if (show_format == SHOW_PLAIN) + { pgBackupWriteControl(stdout, backup, false); + + /* print tablespaces list */ + if (backup_has_tablespace_map(backup)) + { + parray *links = parray_new(); + + fio_fprintf(stdout, "\ntablespace_map = '"); + + read_tablespace_map(links, backup->root_dir); + parray_qsort(links, pgFileCompareLinked); + + for (size_t i = 0; i < parray_num(links); i++){ + pgFile *link = (pgFile *) parray_get(links, i); + fio_fprintf(stdout, "%s %s%s", link->name, link->linked, (i < parray_num(links) - 1) ? "; " : "'\n"); + } + parray_walk(links, pgFileFree); + parray_free(links); + } + } else elog(ERROR, "Invalid show format %d", (int) show_format); @@ -1172,3 +1221,10 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, first_instance = false; } + +static bool backup_has_tablespace_map(pgBackup *backup) +{ + char map_path[MAXPGPATH]; + join_path_components(map_path, backup->database_dir, PG_TABLESPACE_MAP_FILE); + return fileExists(map_path, FIO_BACKUP_HOST); +} diff --git a/src/stream.c b/src/stream.c index f7bbeae5a..77453e997 100644 --- a/src/stream.c +++ b/src/stream.c @@ -307,7 +307,11 @@ StreamLog(void *arg) } #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 160000 + if (!ctl.walmethod->ops->finish(ctl.walmethod)) +#else if (!ctl.walmethod->finish()) +#endif { interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", @@ -529,7 +533,7 @@ get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backu /* link parent to child */ for (i = 0; i < parray_num(tli_list); i++) { - timelineInfo *tlinfo = (timelineInfo *) parray_get(tli_list, i); + tlinfo = (timelineInfo *) parray_get(tli_list, i); for (j = 0; j < parray_num(tli_list); j++) { @@ -546,7 +550,7 @@ get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backu /* add backups to each timeline info */ for (i = 0; i < parray_num(tli_list); i++) { - timelineInfo *tlinfo = parray_get(tli_list, i); + tlinfo = parray_get(tli_list, i); for (j = 0; j < parray_num(backup_list); j++) { pgBackup *backup = parray_get(backup_list, j); @@ -648,7 +652,7 @@ start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOption //TODO Add a comment about this calculation stream_stop_timeout = stream_stop_timeout + stream_stop_timeout * 0.1; - strncpy(stream_thread_arg.basedir, stream_dst_path, sizeof(stream_thread_arg.basedir)); + strlcpy(stream_thread_arg.basedir, stream_dst_path, sizeof(stream_thread_arg.basedir)); /* * Connect in replication mode to the server. diff --git a/src/util.c b/src/util.c index e371d2c6d..3c0a33453 100644 --- a/src/util.c +++ b/src/util.c @@ -74,7 +74,7 @@ checkControlFile(ControlFileData *ControlFile) if ((ControlFile->pg_control_version % 65536 == 0 || ControlFile->pg_control_version % 65536 > 10000) && ControlFile->pg_control_version / 65536 != 0) - elog(ERROR, "possible byte ordering mismatch\n" + elog(ERROR, "Possible byte ordering mismatch\n" "The byte ordering used to store the pg_control file might not match the one\n" "used by this program. In that case the results below would be incorrect, and\n" "the PostgreSQL installation would be incompatible with this data directory."); @@ -93,7 +93,7 @@ digestControlFile(ControlFileData *ControlFile, char *src, size_t size) #endif if (size != ControlFileSize) - elog(ERROR, "unexpected control file size %d, expected %d", + elog(ERROR, "Unexpected control file size %d, expected %d", (int) size, ControlFileSize); memcpy(ControlFile, src, sizeof(ControlFileData)); @@ -190,6 +190,26 @@ get_current_timeline_from_control(const char *pgdata_path, fio_location location return ControlFile.checkPointCopy.ThisTimeLineID; } +void +get_control_file_or_back_file(const char *pgdata_path, fio_location location, ControlFileData *control) +{ + char *buffer; + size_t size; + + /* First fetch file... */ + buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, true, location); + + if (!buffer || size == 0){ + /* Error read XLOG_CONTROL_FILE or file is truncated, trying read backup */ + buffer = slurpFile(pgdata_path, XLOG_CONTROL_BAK_FILE, &size, true, location); + if (!buffer) + elog(ERROR, "Could not read %s and %s files\n", XLOG_CONTROL_FILE, XLOG_CONTROL_BAK_FILE); /* Maybe it should be PANIC? */ + } + digestControlFile(control, buffer, size); + pg_free(buffer); +} + + /* * Get last check point record ptr from pg_tonrol. */ diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 193d1c680..f049aa1be 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -521,11 +521,17 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], optstring = longopts_to_optstring(longopts, cmd_len + len); + opterr = 0; /* Assign named options */ while ((c = getopt_long(argc, argv, optstring, longopts, &optindex)) != -1) { ConfigOption *opt; + if (c == '?') + { + elog(ERROR, "Option '%s' requires an argument. Try \"%s --help\" for more information.", + argv[optind-1], PROGRAM_NAME); + } opt = option_find(c, cmd_options); if (opt == NULL) opt = option_find(c, options); @@ -672,6 +678,8 @@ config_set_opt(ConfigOption options[], void *var, OptionSource source) /* * Return value of the function in the string representation. Result is * allocated string. + * We can set GET_VAL_IN_BASE_UNITS flag in opt->flags + * before call option_get_value() to get option value in default units */ char * option_get_value(ConfigOption *opt) @@ -686,20 +694,33 @@ option_get_value(ConfigOption *opt) */ if (opt->flags & OPTION_UNIT) { - if (opt->type == 'i') - convert_from_base_unit(*((int32 *) opt->var), - opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'i') - convert_from_base_unit(*((int64 *) opt->var), - opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'u') - convert_from_base_unit_u(*((uint32 *) opt->var), - opt->flags & OPTION_UNIT, &value_u, &unit); - else if (opt->type == 'U') - convert_from_base_unit_u(*((uint64 *) opt->var), - opt->flags & OPTION_UNIT, &value_u, &unit); + if (opt->flags & GET_VAL_IN_BASE_UNITS){ + if (opt->type == 'i') + value = *((int32 *) opt->var); + else if (opt->type == 'I') + value = *((int64 *) opt->var); + else if (opt->type == 'u') + value_u = *((uint32 *) opt->var); + else if (opt->type == 'U') + value_u = *((uint64 *) opt->var); + unit = ""; + } + else + { + if (opt->type == 'i') + convert_from_base_unit(*((int32 *) opt->var), + opt->flags & OPTION_UNIT, &value, &unit); + else if (opt->type == 'I') + convert_from_base_unit(*((int64 *) opt->var), + opt->flags & OPTION_UNIT, &value, &unit); + else if (opt->type == 'u') + convert_from_base_unit_u(*((uint32 *) opt->var), + opt->flags & OPTION_UNIT, &value_u, &unit); + else if (opt->type == 'U') + convert_from_base_unit_u(*((uint64 *) opt->var), + opt->flags & OPTION_UNIT, &value_u, &unit); + } } - /* Get string representation itself */ switch (opt->type) { @@ -1177,7 +1198,8 @@ parse_time(const char *value, time_t *result, bool utc_default) char *local_tz = getenv("TZ"); /* tmp = replace( value, !isalnum, ' ' ) */ - tmp = pgut_malloc(strlen(value) + + 1); + tmp = pgut_malloc(strlen(value) + 1); + if(!tmp) return false; len = 0; fields_num = 1; @@ -1205,7 +1227,10 @@ parse_time(const char *value, time_t *result, bool utc_default) errno = 0; hr = strtol(value + 1, &cp, 10); if ((value + 1) == cp || errno == ERANGE) + { + pfree(tmp); return false; + } /* explicit delimiter? */ if (*cp == ':') @@ -1213,13 +1238,19 @@ parse_time(const char *value, time_t *result, bool utc_default) errno = 0; min = strtol(cp + 1, &cp, 10); if (errno == ERANGE) + { + pfree(tmp); return false; + } if (*cp == ':') { errno = 0; sec = strtol(cp + 1, &cp, 10); if (errno == ERANGE) + { + pfree(tmp); return false; + } } } /* otherwise, might have run things together... */ @@ -1234,11 +1265,20 @@ parse_time(const char *value, time_t *result, bool utc_default) /* Range-check the values; see notes in datatype/timestamp.h */ if (hr < 0 || hr > MAX_TZDISP_HOUR) + { + pfree(tmp); return false; + } if (min < 0 || min >= MINS_PER_HOUR) + { + pfree(tmp); return false; + } if (sec < 0 || sec >= SECS_PER_MINUTE) + { + pfree(tmp); return false; + } tz = (hr * MINS_PER_HOUR + min) * SECS_PER_MINUTE + sec; if (*value == '-') @@ -1251,7 +1291,10 @@ parse_time(const char *value, time_t *result, bool utc_default) } /* wrong format */ else if (!IsSpace(*value)) + { + pfree(tmp); return false; + } else value++; } @@ -1268,7 +1311,7 @@ parse_time(const char *value, time_t *result, bool utc_default) i = sscanf(tmp, "%04d %02d %02d %02d %02d %02d%1s", &tm.tm_year, &tm.tm_mon, &tm.tm_mday, &tm.tm_hour, &tm.tm_min, &tm.tm_sec, junk); - free(tmp); + pfree(tmp); if (i < 3 || i > 6) return false; @@ -1294,9 +1337,7 @@ parse_time(const char *value, time_t *result, bool utc_default) { /* set timezone to UTC */ pgut_setenv("TZ", "UTC"); -#ifdef WIN32 tzset(); -#endif } /* convert time to utc unix time */ @@ -1308,9 +1349,7 @@ parse_time(const char *value, time_t *result, bool utc_default) else pgut_unsetenv("TZ"); -#ifdef WIN32 tzset(); -#endif /* adjust time zone */ if (tz_set || utc_default) @@ -1421,16 +1460,16 @@ parse_lsn(const char *value, XLogRecPtr *result) len1 = strspn(value, "0123456789abcdefABCDEF"); if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || value[len1] != '/') - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); len2 = strspn(value + len1 + 1, "0123456789abcdefABCDEF"); if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || value[len1 + 1 + len2] != '\0') - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); if (sscanf(value, "%X/%X", &xlogid, &xrecoff) == 2) *result = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff; else { - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); return false; } @@ -1546,33 +1585,19 @@ time2iso(char *buf, size_t len, time_t time, bool utc) time_t gmt; time_t offset; char *ptr = buf; - char *local_tz = getenv("TZ"); /* set timezone to UTC if requested */ if (utc) { - pgut_setenv("TZ", "UTC"); -#ifdef WIN32 - tzset(); -#endif + ptm = gmtime(&time); + strftime(ptr, len, "%Y-%m-%d %H:%M:%S+00", ptm); + return; } ptm = gmtime(&time); gmt = mktime(ptm); ptm = localtime(&time); - if (utc) - { - /* return old timezone back if any */ - if (local_tz) - pgut_setenv("TZ", local_tz); - else - pgut_unsetenv("TZ"); -#ifdef WIN32 - tzset(); -#endif - } - /* adjust timezone offset */ offset = time - gmt + (ptm->tm_isdst ? 3600 : 0); diff --git a/src/utils/configuration.h b/src/utils/configuration.h index 2c6ea3eec..59da29bd5 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -100,6 +100,7 @@ struct ConfigOption #define OPTION_UNIT_TIME 0xF0000 /* mask for time-related units */ #define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME) +#define GET_VAL_IN_BASE_UNITS 0x80000000 /* bitflag to get memory and time values in default units*/ extern ProbackupSubcmd parse_subcmd(char const * const subcmd_str); extern char const *get_subcmd_name(ProbackupSubcmd const subcmd); diff --git a/src/utils/file.c b/src/utils/file.c index c4ed9c721..fa08939f5 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -1159,24 +1159,35 @@ fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location lo bool fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location) { + char *abs_name1 = make_absolute_path(filename1); + char *abs_name2 = make_absolute_path(filename2); + bool result = strcmp(abs_name1, abs_name2) == 0; + #ifndef WIN32 - struct stat stat1, stat2; + if (!result) + { + struct stat stat1, stat2; - if (fio_stat(filename1, &stat1, follow_symlink, location) < 0) - elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); + if (fio_stat(filename1, &stat1, follow_symlink, location) < 0) + { + if (errno == ENOENT) + return false; + elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); + } - if (fio_stat(filename2, &stat2, follow_symlink, location) < 0) - elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); + if (fio_stat(filename2, &stat2, follow_symlink, location) < 0) + { + if (errno == ENOENT) + return false; + elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); + } - return stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev; -#else - char *abs_name1 = make_absolute_path(filename1); - char *abs_name2 = make_absolute_path(filename2); - bool result = strcmp(abs_name1, abs_name2) == 0; + result = (stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev); + } +#endif free(abs_name2); free(abs_name1); return result; -#endif } /* @@ -2526,11 +2537,22 @@ fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) exit_code = hdr.arg; goto cleanup; } - else if (hdr.cop == FIO_PAGE) + else if (hdr.cop == FIO_PAGE || hdr.cop == FIO_PAGE_ZERO) { int rc; - Assert(hdr.size <= CHUNK_SIZE); - IO_CHECK(fio_read_all(fio_stdin, in_buf, hdr.size), hdr.size); + unsigned size; + if (hdr.cop == FIO_PAGE) + { + Assert(hdr.size <= CHUNK_SIZE); + size = hdr.size; + IO_CHECK(fio_read_all(fio_stdin, in_buf, hdr.size), hdr.size); + } + else + { + Assert(hdr.arg <= CHUNK_SIZE); + size = hdr.arg; + memset(in_buf, 0, hdr.arg); + } /* We have received a chunk of compressed data, lets decompress it */ if (strm == NULL) @@ -2541,7 +2563,7 @@ fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) /* The fields next_in, avail_in initialized before init */ strm->next_in = (Bytef *)in_buf; - strm->avail_in = hdr.size; + strm->avail_in = size; rc = inflateInit2(strm, 15 + 16); @@ -2558,7 +2580,7 @@ fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) else { strm->next_in = (Bytef *)in_buf; - strm->avail_in = hdr.size; + strm->avail_in = size; } strm->next_out = (Bytef *)out_buf; /* output buffer */ @@ -2717,6 +2739,14 @@ fio_send_file_write(FILE* out, send_file_state* st, char *buf, size_t len) if (len == 0) return true; +#ifdef WIN32 + if (st->read_size > st->write_size && + _chsize_s(fileno(out), st->read_size) != 0) + { + elog(WARNING, "Could not change file size to %lld: %m", st->read_size); + return false; + } +#endif if (st->read_size > st->write_size && fseeko(out, st->read_size, SEEK_SET) != 0) { diff --git a/src/utils/parray.c b/src/utils/parray.c index 792e26907..65377c001 100644 --- a/src/utils/parray.c +++ b/src/utils/parray.c @@ -217,3 +217,30 @@ bool parray_contains(parray *array, void *elem) } return false; } + +/* effectively remove elements that satisfy certain criterion */ +void +parray_remove_if(parray *array, criterion_fn criterion, void *args, cleanup_fn clean) { + int i = 0; + int j = 0; + + /* removing certain elements */ + while(j < parray_num(array)) { + void *value = array->data[j]; + // if the value satisfies the criterion, clean it up + if(criterion(value, args)) { + clean(value); + j++; + continue; + } + + if(i != j) + array->data[i] = array->data[j]; + + i++; + j++; + } + + /* adjust the number of used elements */ + array->used -= j - i; +} diff --git a/src/utils/parray.h b/src/utils/parray.h index e92ad728c..08846f252 100644 --- a/src/utils/parray.h +++ b/src/utils/parray.h @@ -16,6 +16,9 @@ */ typedef struct parray parray; +typedef bool (*criterion_fn)(void *value, void *args); +typedef void (*cleanup_fn)(void *ref); + extern parray *parray_new(void); extern void parray_expand(parray *array, size_t newnum); extern void parray_free(parray *array); @@ -32,6 +35,7 @@ extern void *parray_bsearch(parray *array, const void *key, int(*compare)(const extern int parray_bsearch_index(parray *array, const void *key, int(*compare)(const void *, const void *)); extern void parray_walk(parray *array, void (*action)(void *)); extern bool parray_contains(parray *array, void *elem); +extern void parray_remove_if(parray *array, criterion_fn criterion, void *args, cleanup_fn clean); #endif /* PARRAY_H */ diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 6123c18d8..9559fa644 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -1215,13 +1215,16 @@ pgut_pgfnames(const char *path, bool strict) } } + filenames[numnames] = NULL; + if (errno) { elog(strict ? ERROR : WARNING, "could not read directory \"%s\": %m", path); + pgut_pgfnames_cleanup(filenames); + closedir(dir); return NULL; } - filenames[numnames] = NULL; if (closedir(dir)) { diff --git a/src/utils/pgut.h b/src/utils/pgut.h index f8554f9d0..1b7b7864c 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -115,4 +115,14 @@ extern int usleep(unsigned int usec); #define ARG_SIZE_HINT static #endif +static inline uint32 hash_mix32_2(uint32 a, uint32 b) +{ + b ^= (a<<7)|(a>>25); + a *= 0xdeadbeef; + b *= 0xcafeabed; + a ^= a >> 16; + b ^= b >> 15; + return a^b; +} + #endif /* PGUT_H */ diff --git a/src/validate.c b/src/validate.c index 9372b082c..0887b2e7a 100644 --- a/src/validate.c +++ b/src/validate.c @@ -394,15 +394,13 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) /* open directory and list contents */ dir = opendir(catalogState->backup_subdir_path); if (dir == NULL) - elog(ERROR, "cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno)); + elog(ERROR, "Cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno)); errno = 0; while ((dent = readdir(dir))) { char child[MAXPGPATH]; struct stat st; - InstanceState *instanceState; - /* skip entries point current dir or parent dir */ if (strcmp(dent->d_name, ".") == 0 || @@ -412,7 +410,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) join_path_components(child, catalogState->backup_subdir_path, dent->d_name); if (lstat(child, &st) == -1) - elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno)); + elog(ERROR, "Cannot stat file \"%s\": %s", child, strerror(errno)); if (!S_ISDIR(st.st_mode)) continue; @@ -420,7 +418,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) /* * Initialize instance configuration. */ - instanceState = pgut_new(InstanceState); + instanceState = pgut_new(InstanceState); /* memory leak */ strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, diff --git a/tests/archive_test.py b/tests/archive_test.py index b2217a7bf..00fd1f592 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -3,6 +3,7 @@ import gzip import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException +from .helpers.data_helpers import tail_file from datetime import datetime, timedelta import subprocess from sys import exit @@ -383,26 +384,31 @@ def test_archive_push_file_exists(self): self.switch_wal_segment(node) sleep(1) - with open(log_file, 'r') as f: - log_content = f.read() + log = tail_file(log_file, linetimeout=30, totaltimeout=120, + collect=True) + log.wait(contains = 'The failed archive command was') + self.assertIn( 'LOG: archive command failed with exit code 1', - log_content) + log.content) self.assertIn( 'DETAIL: The failed archive command was:', - log_content) + log.content) self.assertIn( 'pg_probackup archive-push WAL file', - log_content) + log.content) self.assertIn( 'WAL file already exists in archive with different checksum', - log_content) + log.content) self.assertNotIn( - 'pg_probackup archive-push completed successfully', log_content) + 'pg_probackup archive-push completed successfully', log.content) + + # btw check that console coloring codes are not slipped into log file + self.assertNotIn('[0m', log.content) if self.get_version(node) < 100000: wal_src = os.path.join( @@ -419,19 +425,9 @@ def test_archive_push_file_exists(self): shutil.copyfile(wal_src, file) self.switch_wal_segment(node) - sleep(5) - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'pg_probackup archive-push completed successfully', - log_content) - - # btw check that console coloring codes are not slipped into log file - self.assertNotIn('[0m', log_content) - - print(log_content) + log.stop_collect() + log.wait(contains = 'pg_probackup archive-push completed successfully') # @unittest.skip("skip") def test_archive_push_file_exists_overwrite(self): @@ -471,39 +467,35 @@ def test_archive_push_file_exists_overwrite(self): self.switch_wal_segment(node) sleep(1) - with open(log_file, 'r') as f: - log_content = f.read() + log = tail_file(log_file, linetimeout=30, collect=True) + log.wait(contains = 'The failed archive command was') self.assertIn( - 'LOG: archive command failed with exit code 1', log_content) + 'LOG: archive command failed with exit code 1', log.content) self.assertIn( - 'DETAIL: The failed archive command was:', log_content) + 'DETAIL: The failed archive command was:', log.content) self.assertIn( - 'pg_probackup archive-push WAL file', log_content) + 'pg_probackup archive-push WAL file', log.content) self.assertNotIn( 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) + 'different checksum, overwriting', log.content) self.assertIn( 'WAL file already exists in archive with ' - 'different checksum', log_content) + 'different checksum', log.content) self.assertNotIn( - 'pg_probackup archive-push completed successfully', log_content) + 'pg_probackup archive-push completed successfully', log.content) self.set_archiving(backup_dir, 'node', node, overwrite=True) node.reload() self.switch_wal_segment(node) - sleep(5) - with open(log_file, 'r') as f: - log_content = f.read() - self.assertTrue( - 'pg_probackup archive-push completed successfully' in log_content, - 'Expecting messages about successfull execution archive_command') + log.drop_content() + log.wait(contains = 'pg_probackup archive-push completed successfully') self.assertIn( 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) + 'different checksum, overwriting', log.content) # @unittest.skip("skip") def test_archive_push_partial_file_exists(self): @@ -2021,7 +2013,7 @@ def test_archive_push_sanity(self): self.backup_node(backup_dir, 'node', node) with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() + postgres_log_content = cleanup_ptrack(f.read()) # print(postgres_log_content) # make sure that .backup file is not compressed @@ -2049,14 +2041,22 @@ def test_archive_push_sanity(self): replica.promote() replica.pgbench_init(scale=10) - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - replica_log_content = f.read() + log = tail_file(os.path.join(replica.logs_dir, 'postgresql.log'), + collect=True) + log.wait(regex=r"pushing file.*history") + log.wait(contains='archive-push completed successfully') + log.wait(regex=r"pushing file.*partial") + log.wait(contains='archive-push completed successfully') # make sure that .partial file is not compressed - self.assertNotIn('.partial.gz', replica_log_content) + self.assertNotIn('.partial.gz', log.content) # make sure that .history file is not compressed - self.assertNotIn('.history.gz', replica_log_content) - self.assertNotIn('WARNING', replica_log_content) + self.assertNotIn('.history.gz', log.content) + + replica.stop() + log.wait_shutdown() + + self.assertNotIn('WARNING', cleanup_ptrack(log.content)) output = self.show_archive( backup_dir, 'node', as_json=False, as_text=True, @@ -2440,18 +2440,11 @@ def test_archive_get_prefetch_corruption(self): os.remove(os.path.join(replica.logs_dir, 'postgresql.log')) replica.slow_start(replica=True) - sleep(60) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - self.assertIn( - 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename), - postgres_log_content) - - self.assertIn( - 'LOG: restored log file "{0}" from archive'.format(filename), - postgres_log_content) + prefetch_line = 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename) + restored_line = 'LOG: restored log file "{0}" from archive'.format(filename) + tailer = tail_file(os.path.join(replica.logs_dir, 'postgresql.log')) + tailer.wait(contains=prefetch_line) + tailer.wait(contains=restored_line) # @unittest.skip("skip") def test_archive_show_partial_files_handling(self): @@ -2669,6 +2662,17 @@ def test_archive_empty_history_file(self): 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), log_content) + +def cleanup_ptrack(log_content): + # PBCKP-423 - need to clean ptrack warning + ptrack_is_not = 'Ptrack 1.X is not supported anymore' + if ptrack_is_not in log_content: + lines = [line for line in log_content.splitlines() + if ptrack_is_not not in line] + log_content = "".join(lines) + return log_content + + # TODO test with multiple not archived segments. # TODO corrupted file in archive. diff --git a/tests/auth_test.py b/tests/auth_test.py index 52d7e1544..32cabc4a1 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -117,13 +117,13 @@ def test_backup_via_unprivileged_user(self): except ProbackupException as e: if self.get_version(node) < 150000: self.assertIn( - "ERROR: query failed: ERROR: permission denied " + "ERROR: Query failed: ERROR: permission denied " "for function pg_stop_backup", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) else: self.assertIn( - "ERROR: query failed: ERROR: permission denied " + "ERROR: Query failed: ERROR: permission denied " "for function pg_backup_stop", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) diff --git a/tests/backup_test.py b/tests/backup_test.py index fc1135cab..dc60228b5 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -2926,9 +2926,9 @@ def test_missing_wal_segment(self): gdb.output) self.assertIn( - 'WARNING: backup in progress, stop backup', + 'WARNING: A backup is in progress, stopping it', gdb.output) - + # TODO: check the same for PAGE backup # @unittest.skip("skip") @@ -3075,11 +3075,20 @@ def test_missing_replication_permission(self): except ProbackupException as e: # 9.5: ERROR: must be superuser or replication role to run a backup # >=9.6: FATAL: must be superuser or replication role to start walsender - self.assertRegex( - e.message, - "ERROR: must be superuser or replication role to run a backup|FATAL: must be superuser or replication role to start walsender", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) + if self.pg_config_version < 160000: + self.assertRegex( + e.message, + "ERROR: must be superuser or replication role to run a backup|" + "FATAL: must be superuser or replication role to start walsender", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + else: + self.assertRegex( + e.message, + "FATAL: permission denied to start WAL sender\n" + "DETAIL: Only roles with the REPLICATION", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) # @unittest.skip("skip") def test_missing_replication_permission_1(self): @@ -3228,9 +3237,17 @@ def test_missing_replication_permission_1(self): # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender' # OS-dependant messages: # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (::1), port 12101 failed: Connection refused\n\tIs the server running on that host and accepting TCP/IP connections?\nconnection to server at "localhost" (127.0.0.1), port 12101 failed: FATAL: must be superuser or replication role to start walsender' - self.assertRegex( - output, - r'WARNING: could not connect to database backupdb:[\s\S]*?FATAL: must be superuser or replication role to start walsender') + + if self.pg_config_version < 160000: + self.assertRegex( + output, + r'WARNING: could not connect to database backupdb:[\s\S]*?' + r'FATAL: must be superuser or replication role to start walsender') + else: + self.assertRegex( + output, + r'WARNING: could not connect to database backupdb:[\s\S]*?' + r'FATAL: permission denied to start WAL sender') # @unittest.skip("skip") def test_basic_backup_default_transaction_read_only(self): @@ -3316,7 +3333,7 @@ def test_backup_atexit(self): log_content = f.read() #print(log_content) self.assertIn( - 'WARNING: backup in progress, stop backup', + 'WARNING: A backup is in progress, stopping it.', log_content) if self.get_version(node) < 150000: @@ -3327,7 +3344,7 @@ def test_backup_atexit(self): self.assertIn( 'FROM pg_catalog.pg_backup_stop', log_content) - + self.assertIn( 'setting its status to ERROR', log_content) @@ -3573,3 +3590,69 @@ def test_start_time_few_nodes(self): show_backup2 = self.show_pb(backup_dir2, 'node2')[3] self.assertEqual(show_backup1['id'], show_backup2['id']) + def test_regress_issue_585(self): + """https://github.com/postgrespro/pg_probackup/issues/585""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # create couple of files that looks like db files + with open(os.path.join(node.data_dir, 'pg_multixact/offsets/1000'),'wb') as f: + pass + with open(os.path.join(node.data_dir, 'pg_multixact/members/1000'),'wb') as f: + pass + + self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream']) + + output = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream'], + return_id=False, + ) + self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like') + + node.cleanup() + + output = self.restore_node(backup_dir, 'node', node) + self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like') + + def test_2_delta_backups(self): + """https://github.com/postgrespro/pg_probackup/issues/596""" + node = self.make_simple_node('node', + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + # self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + full_backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # delta backup mode + delta_backup_id1 = self.backup_node( + backup_dir, 'node', node, backup_type="delta", options=["--stream"]) + + delta_backup_id2 = self.backup_node( + backup_dir, 'node', node, backup_type="delta", options=["--stream"]) + + # postgresql.conf and pg_hba.conf shouldn't be copied + conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id1, 'database', 'postgresql.conf') + self.assertFalse( + os.path.exists(conf_file), + "File should not exist: {0}".format(conf_file)) + conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id2, 'database', 'postgresql.conf') + print(conf_file) + self.assertFalse( + os.path.exists(conf_file), + "File should not exist: {0}".format(conf_file)) diff --git a/tests/catchup_test.py b/tests/catchup_test.py index 21bcd7973..cf8388dd2 100644 --- a/tests/catchup_test.py +++ b/tests/catchup_test.py @@ -1585,3 +1585,42 @@ def test_dry_run_catchup_delta(self): # Cleanup src_pg.stop() + + def test_pgdata_is_ignored(self): + """ In catchup we still allow PGDATA to be set either from command line + or from the env var. This test that PGDATA is actually ignored and + --source-pgadta is used instead + """ + node = self.make_simple_node('node', + set_replication = True + ) + node.slow_start() + + # do full catchup + dest = self.make_empty_node('dst') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = node.data_dir, + destination_node = dest, + options = ['-d', 'postgres', '-p', str(node.port), '--stream', '--pgdata=xxx'] + ) + + self.compare_pgdata( + self.pgdata_content(node.data_dir), + self.pgdata_content(dest.data_dir) + ) + + os.environ['PGDATA']='xxx' + + dest2 = self.make_empty_node('dst') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = node.data_dir, + destination_node = dest2, + options = ['-d', 'postgres', '-p', str(node.port), '--stream'] + ) + + self.compare_pgdata( + self.pgdata_content(node.data_dir), + self.pgdata_content(dest2.data_dir) + ) diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py index cd2826d21..fb4a6c6b8 100644 --- a/tests/cfs_backup_test.py +++ b/tests/cfs_backup_test.py @@ -431,16 +431,10 @@ def test_page_doesnt_store_unchanged_cfm(self): "FROM generate_series(0,256) i".format('t1', tblspace_name) ) - try: - backup_id_full = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='full') - except ProbackupException as e: - self.fail( - "ERROR: Full backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) + self.node.safe_psql("postgres", "checkpoint") + + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') self.assertTrue( find_by_extensions( @@ -449,16 +443,8 @@ def test_page_doesnt_store_unchanged_cfm(self): "ERROR: .cfm files not found in backup dir" ) - try: - backup_id = self.backup_node( - self.backup_dir, 'node', self.node, backup_type='page') - except ProbackupException as e: - self.fail( - "ERROR: Incremental backup failed.\n {0} \n {1}".format( - repr(self.cmd), - repr(e.message) - ) - ) + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') show_backup = self.show_pb(self.backup_dir, 'node', backup_id) self.assertEqual( @@ -1046,7 +1032,6 @@ def test_fullbackup_after_create_table_page_after_create_table_stream(self): ) # --- Make backup with not valid data(broken .cfm) --- # - @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_delete_random_cfm_file_from_tablespace_dir(self): diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index 1e6daefdb..eb46aea19 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -131,7 +131,7 @@ def test_checkdb_amcheck_only_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: required parameter not specified: --instance", + "ERROR: Required parameter not specified: --instance", e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -397,7 +397,7 @@ def test_checkdb_block_validation_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: required parameter not specified: PGDATA (-D, --pgdata)", + "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -808,7 +808,7 @@ def test_checkdb_with_least_privileges(self): "backupdb", "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") - if ProbackupTest.enterprise: + if ProbackupTest.pgpro: node.safe_psql( 'backupdb', 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; ' diff --git a/tests/compatibility_test.py b/tests/compatibility_test.py index 591afb069..7ae8baf9f 100644 --- a/tests/compatibility_test.py +++ b/tests/compatibility_test.py @@ -14,12 +14,7 @@ def check_ssh_agent_path_exists(): return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ -class CompatibilityTest(ProbackupTest, unittest.TestCase): - - def setUp(self): - self.fname = self.id().split('.')[3] - - # @unittest.expectedFailure +class CrossCompatibilityTest(ProbackupTest, unittest.TestCase): @unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test') @unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist') # @unittest.skip("skip") @@ -86,6 +81,14 @@ def test_catchup_with_different_remote_major_pg(self): options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path] ) + +class CompatibilityTest(ProbackupTest, unittest.TestCase): + + def setUp(self): + super().setUp() + if not self.probackup_old_path: + self.skipTest('PGPROBACKUPBIN_OLD is not set') + # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_page(self): diff --git a/tests/compression_test.py b/tests/compression_test.py index e779f6472..55924b9d2 100644 --- a/tests/compression_test.py +++ b/tests/compression_test.py @@ -443,7 +443,7 @@ def test_compression_wrong_algorithm(self): except ProbackupException as e: self.assertEqual( e.message, - 'ERROR: invalid compress algorithm value "bla-blah"\n', + 'ERROR: Invalid compress algorithm value "bla-blah"\n', '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index 5948d0503..f0c77ae16 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -5,9 +5,9 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup version - pg_probackup init -B backup-path + pg_probackup init -B backup-dir - pg_probackup set-config -B backup-path --instance=instance_name + pg_probackup set-config -B backup-dir --instance=instance-name [-D pgdata-path] [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] @@ -32,16 +32,17 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--archive-port=port] [--archive-user=username] [--help] - pg_probackup set-backup -B backup-path --instance=instance_name + pg_probackup set-backup -B backup-dir --instance=instance-name -i backup-id [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup show-config -B backup-path --instance=instance_name + pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] + [--no-scale-units] [--help] - pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name [-D pgdata-path] [-C] [--stream [-S slot-name] [--temp-slot]] [--backup-pg-log] [-j num-threads] [--progress] @@ -73,7 +74,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup restore -B backup-path --instance=instance_name + pg_probackup restore -B backup-dir --instance=instance-name [-D pgdata-path] [-i backup-id] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -92,6 +93,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] + [--destroy-all-other-dbs] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] @@ -99,7 +101,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--archive-port=port] [--archive-user=username] [--help] - pg_probackup validate -B backup-path [--instance=instance_name] + pg_probackup validate -B backup-dir [--instance=instance-name] [-i backup-id] [--progress] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -108,18 +110,18 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--skip-block-validation] [--help] - pg_probackup checkdb [-B backup-path] [--instance=instance_name] + pg_probackup checkdb [-B backup-dir] [--instance=instance-name] [-D pgdata-path] [--progress] [-j num-threads] [--amcheck] [--skip-block-validation] [--heapallindexed] [--checkunique] [--help] - pg_probackup show -B backup-path - [--instance=instance_name [-i backup-id]] + pg_probackup show -B backup-dir + [--instance=instance-name [-i backup-id]] [--format=format] [--archive] [--no-color] [--help] - pg_probackup delete -B backup-path --instance=instance_name + pg_probackup delete -B backup-dir --instance=instance-name [-j num-threads] [--progress] [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] @@ -129,24 +131,24 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--dry-run] [--no-validate] [--no-sync] [--help] - pg_probackup merge -B backup-path --instance=instance_name + pg_probackup merge -B backup-dir --instance=instance-name -i backup-id [--progress] [-j num-threads] [--no-validate] [--no-sync] [--help] - pg_probackup add-instance -B backup-path -D pgdata-path - --instance=instance_name + pg_probackup add-instance -B backup-dir -D pgdata-path + --instance=instance-name [--external-dirs=external-directories-paths] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--help] - pg_probackup del-instance -B backup-path - --instance=instance_name + pg_probackup del-instance -B backup-dir + --instance=instance-name [--help] - pg_probackup archive-push -B backup-path --instance=instance_name + pg_probackup archive-push -B backup-dir --instance=instance-name --wal-file-name=wal-file-name [--wal-file-path=wal-file-path] [-j num-threads] [--batch-size=batch_size] @@ -160,7 +162,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ssh-options] [--help] - pg_probackup archive-get -B backup-path --instance=instance_name + pg_probackup archive-get -B backup-dir --instance=instance-name --wal-file-path=wal-file-path --wal-file-name=wal-file-name [-j num-threads] [--batch-size=batch_size] diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out index 358c49428..bd6d76970 100644 --- a/tests/expected/option_help_ru.out +++ b/tests/expected/option_help_ru.out @@ -5,9 +5,9 @@ pg_probackup - утилита для управления резервным к pg_probackup version - pg_probackup init -B backup-path + pg_probackup init -B backup-dir - pg_probackup set-config -B backup-path --instance=instance_name + pg_probackup set-config -B backup-dir --instance=instance-name [-D pgdata-path] [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] @@ -32,16 +32,17 @@ pg_probackup - утилита для управления резервным к [--archive-port=port] [--archive-user=username] [--help] - pg_probackup set-backup -B backup-path --instance=instance_name + pg_probackup set-backup -B backup-dir --instance=instance-name -i backup-id [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup show-config -B backup-path --instance=instance_name + pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] + [--no-scale-units] [--help] - pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name [-D pgdata-path] [-C] [--stream [-S slot-name] [--temp-slot]] [--backup-pg-log] [-j num-threads] [--progress] @@ -73,7 +74,7 @@ pg_probackup - утилита для управления резервным к [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup restore -B backup-path --instance=instance_name + pg_probackup restore -B backup-dir --instance=instance-name [-D pgdata-path] [-i backup-id] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -92,6 +93,7 @@ pg_probackup - утилита для управления резервным к [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] + [--destroy-all-other-dbs] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] @@ -99,7 +101,7 @@ pg_probackup - утилита для управления резервным к [--archive-port=port] [--archive-user=username] [--help] - pg_probackup validate -B backup-path [--instance=instance_name] + pg_probackup validate -B backup-dir [--instance=instance-name] [-i backup-id] [--progress] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -108,18 +110,18 @@ pg_probackup - утилита для управления резервным к [--skip-block-validation] [--help] - pg_probackup checkdb [-B backup-path] [--instance=instance_name] + pg_probackup checkdb [-B backup-dir] [--instance=instance-name] [-D pgdata-path] [--progress] [-j num-threads] [--amcheck] [--skip-block-validation] [--heapallindexed] [--checkunique] [--help] - pg_probackup show -B backup-path - [--instance=instance_name [-i backup-id]] + pg_probackup show -B backup-dir + [--instance=instance-name [-i backup-id]] [--format=format] [--archive] [--no-color] [--help] - pg_probackup delete -B backup-path --instance=instance_name + pg_probackup delete -B backup-dir --instance=instance-name [-j num-threads] [--progress] [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] @@ -129,24 +131,24 @@ pg_probackup - утилита для управления резервным к [--dry-run] [--no-validate] [--no-sync] [--help] - pg_probackup merge -B backup-path --instance=instance_name + pg_probackup merge -B backup-dir --instance=instance-name -i backup-id [--progress] [-j num-threads] [--no-validate] [--no-sync] [--help] - pg_probackup add-instance -B backup-path -D pgdata-path - --instance=instance_name + pg_probackup add-instance -B backup-dir -D pgdata-path + --instance=instance-name [--external-dirs=external-directories-paths] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--help] - pg_probackup del-instance -B backup-path - --instance=instance_name + pg_probackup del-instance -B backup-dir + --instance=instance-name [--help] - pg_probackup archive-push -B backup-path --instance=instance_name + pg_probackup archive-push -B backup-dir --instance=instance-name --wal-file-name=wal-file-name [--wal-file-path=wal-file-path] [-j num-threads] [--batch-size=batch_size] @@ -160,7 +162,7 @@ pg_probackup - утилита для управления резервным к [--ssh-options] [--help] - pg_probackup archive-get -B backup-path --instance=instance_name + pg_probackup archive-get -B backup-dir --instance=instance-name --wal-file-path=wal-file-path --wal-file-name=wal-file-name [-j num-threads] [--batch-size=batch_size] diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out deleted file mode 100644 index e0d6924b9..000000000 --- a/tests/expected/option_version.out +++ /dev/null @@ -1 +0,0 @@ -pg_probackup 2.5.11 diff --git a/tests/false_positive_test.py b/tests/false_positive_test.py index fbb785c60..ea82cb18f 100644 --- a/tests/false_positive_test.py +++ b/tests/false_positive_test.py @@ -203,13 +203,16 @@ def test_recovery_target_time_backup_victim(self): backup_dir, 'node', options=['--recovery-target-time={0}'.format(target_time)]) - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") def test_recovery_target_lsn_backup_victim(self): """ Check that for validation to recovery target probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 + + @y.sokolov: looks like this test should pass. + So I commented 'expectedFailure' """ backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py index 4ae3ef8c4..2e5ed40e8 100644 --- a/tests/helpers/__init__.py +++ b/tests/helpers/__init__.py @@ -1,4 +1,4 @@ -__all__ = ['ptrack_helpers', 'cfs_helpers', 'expected_errors'] +__all__ = ['ptrack_helpers', 'cfs_helpers', 'data_helpers'] import unittest diff --git a/tests/helpers/data_helpers.py b/tests/helpers/data_helpers.py new file mode 100644 index 000000000..27cb66c3d --- /dev/null +++ b/tests/helpers/data_helpers.py @@ -0,0 +1,78 @@ +import re +import unittest +import functools +import time + +def _tail_file(file, linetimeout, totaltimeout): + start = time.time() + with open(file, 'r') as f: + waits = 0 + while waits < linetimeout: + line = f.readline() + if line == '': + waits += 1 + time.sleep(1) + continue + waits = 0 + yield line + if time.time() - start > totaltimeout: + raise TimeoutError("total timeout tailing %s" % (file,)) + else: + raise TimeoutError("line timeout tailing %s" % (file,)) + + +class tail_file(object): # snake case to immitate function + def __init__(self, filename, *, linetimeout=10, totaltimeout=60, collect=False): + self.filename = filename + self.tailer = _tail_file(filename, linetimeout, totaltimeout) + self.collect = collect + self.lines = [] + self._content = None + + def __iter__(self): + return self + + def __next__(self): + line = next(self.tailer) + if self.collect: + self.lines.append(line) + self._content = None + return line + + @property + def content(self): + if not self.collect: + raise AttributeError("content collection is not enabled", + name="content", obj=self) + if not self._content: + self._content = "".join(self.lines) + return self._content + + def drop_content(self): + self.lines.clear() + self._content = None + + def stop_collect(self): + self.drop_content() + self.collect = False + + def wait(self, *, contains:str = None, regex:str = None): + assert contains != None or regex != None + assert contains == None or regex == None + try: + for line in self: + if contains is not None and contains in line: + break + if regex is not None and re.search(regex, line): + break + except TimeoutError: + msg = "Didn't found expected " + if contains is not None: + msg += repr(contains) + elif regex is not None: + msg += f"/{regex}/" + msg += f" in {self.filename}" + raise unittest.TestCase.failureException(msg) + + def wait_shutdown(self): + self.wait(contains='database system is shut down') diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 067225d66..27d982856 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -90,27 +90,34 @@ def dir_files(base_dir): return out_list +def is_pgpro(): + # pg_config --help + cmd = [os.environ['PG_CONFIG'], '--help'] + + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + return b'postgrespro' in result.stdout + + def is_enterprise(): # pg_config --help cmd = [os.environ['PG_CONFIG'], '--help'] - p = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - return b'postgrespro.ru' in p.communicate()[0] + p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + # PostgresPro std or ent + if b'postgrespro' in p.stdout: + cmd = [os.environ['PG_CONFIG'], '--pgpro-edition'] + p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + + return b'enterprise' in p.stdout + else: # PostgreSQL + return False + - def is_nls_enabled(): cmd = [os.environ['PG_CONFIG'], '--configure'] - p = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - return b'enable-nls' in p.communicate()[0] + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + return b'enable-nls' in result.stdout def base36enc(number): @@ -229,6 +236,7 @@ class ProbackupTest(object): # Class attributes enterprise = is_enterprise() enable_nls = is_nls_enabled() + pgpro = is_pgpro() def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) @@ -415,8 +423,12 @@ def is_test_result_ok(test_case): result = test_case.defaultTestResult() # These two methods have no side effects test_case._feedErrorsToResult(result, test_case._outcome.errors) else: - # Python 3.11+ + # Python 3.11+ and pytest 5.3.5+ result = test_case._outcome.result + if not hasattr(result, 'errors'): + result.errors = [] + if not hasattr(result, 'failures'): + result.failures = [] else: # Python 2.7, 3.0-3.3 result = getattr(test_case, '_outcomeForDoCleanups', test_case._resultForDoCleanups) @@ -974,7 +986,8 @@ def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, retur else: return self.output except subprocess.CalledProcessError as e: - raise ProbackupException(e.output.decode('utf-8'), self.cmd) + raise ProbackupException(e.output.decode('utf-8').replace("\r",""), + self.cmd) def run_binary(self, command, asynchronous=False, env=None): @@ -1774,7 +1787,7 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): 'ptrack_control', 'ptrack_init', 'pg_control', 'probackup_recovery.conf', 'recovery.signal', 'standby.signal', 'ptrack.map', 'ptrack.map.mmap', - 'ptrack.map.tmp' + 'ptrack.map.tmp', 'recovery.done','backup_label.old' ] if exclude_dirs: @@ -1875,7 +1888,7 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict # Compare directories restored_dirs = set(restored_pgdata['dirs']) - original_dirs = set(restored_pgdata['dirs']) + original_dirs = set(original_pgdata['dirs']) for directory in sorted(restored_dirs - original_dirs): fail = True @@ -1903,7 +1916,7 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict restored.mode) restored_files = set(restored_pgdata['files']) - original_files = set(restored_pgdata['files']) + original_files = set(original_pgdata['files']) for file in sorted(restored_files - original_files): # File is present in RESTORED PGDATA @@ -2243,4 +2256,4 @@ def __init__(self, is_datafile: bool): self.is_datafile = is_datafile class ContentDir(object): - __slots__ = ('mode') \ No newline at end of file + __slots__ = ('mode') diff --git a/tests/incr_restore_test.py b/tests/incr_restore_test.py index 613e4dd36..6a2164098 100644 --- a/tests/incr_restore_test.py +++ b/tests/incr_restore_test.py @@ -9,8 +9,9 @@ import hashlib import shutil import json -from testgres import QueryException - +from testgres import QueryException, StartNodeException +import stat +from stat import S_ISDIR class IncrRestoreTest(ProbackupTest, unittest.TestCase): @@ -1962,7 +1963,9 @@ def test_incremental_partial_restore_exclude_checksum(self): node2, options=[ "--db-exclude=db1", "--db-exclude=db5", - "-I", "checksum"]) + "-I", "checksum", + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2068,7 +2071,9 @@ def test_incremental_partial_restore_exclude_lsn(self): node2, options=[ "--db-exclude=db1", "--db-exclude=db5", - "-I", "lsn"]) + "-I", "lsn", + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2188,7 +2193,8 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): "--db-exclude=db1", "--db-exclude=db5", "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) + node_tablespace, node2_tablespace), + "--destroy-all-other-dbs"]) # we should die here because exception is what we expect to happen self.assertEqual( 1, 0, @@ -2209,7 +2215,9 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): "--db-exclude=db1", "--db-exclude=db5", "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) + node_tablespace, node2_tablespace), + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2241,6 +2249,127 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): self.assertNotIn('PANIC', output) + def test_incremental_partial_restore_deny(self): + """ + Do now allow partial incremental restore into non-empty PGDATA + becase we can't limit WAL replay to a single database. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 3): + node.safe_psql('postgres', f'CREATE database db{i}') + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + pgdata = self.pgdata_content(node.data_dir) + + try: + self.restore_node(backup_dir, 'node', node, options=["--db-include=db1", '-I', 'LSN']) + self.fail("incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Postmaster is running.", e.message) + + node.safe_psql('db2', 'create table x (id int)') + node.safe_psql('db2', 'insert into x values (42)') + + node.stop() + + try: + self.restore_node(backup_dir, 'node', node, options=["--db-include=db1", '-I', 'LSN']) + self.fail("because incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden", e.message) + + node.slow_start() + value = node.execute('db2', 'select * from x')[0][0] + self.assertEqual(42, value) + + def test_deny_incremental_partial_restore_exclude_tablespace_checksum(self): + """ + Do now allow partial incremental restore into non-empty PGDATA + becase we can't limit WAL replay to a single database. + (case of tablespaces) + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + node_tablespace = self.get_tblspace_path(node, 'somedata') + + tbl_oid = node.safe_psql( + 'postgres', + "SELECT oid " + "FROM pg_tablespace " + "WHERE spcname = 'somedata'").rstrip() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0} tablespace somedata'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + # node2 + node2 = self.make_simple_node('node2') + node2.cleanup() + node2_tablespace = self.get_tblspace_path(node2, 'somedata') + + # in node2 restore full backup + self.restore_node( + backup_dir, 'node', + node2, options=[ + "-T", f"{node_tablespace}={node2_tablespace}"]) + + # partial incremental restore into node2 + try: + self.restore_node(backup_dir, 'node', node2, + options=["-I", "checksum", + "--db-exclude=db1", + "--db-exclude=db5", + "-T", f"{node_tablespace}={node2_tablespace}"]) + self.fail("remapped tablespace contain old data") + except ProbackupException as e: + pass + + try: + self.restore_node(backup_dir, 'node', node2, + options=[ + "-I", "checksum", "--force", + "--db-exclude=db1", "--db-exclude=db5", + "-T", f"{node_tablespace}={node2_tablespace}"]) + self.fail("incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden", e.message) + def test_incremental_pg_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 @@ -2298,3 +2427,206 @@ def test_incremental_pg_filenode_map(self): 'select 1') # check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn + + # @unittest.skip("skip") + def test_incr_restore_issue_313(self): + """ + Check that failed incremental restore can be restarted + """ + self._check_gdb_flag_or_skip_test + node = self.make_simple_node('node', + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale = 50) + + full_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='full') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + last_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + self.restore_node(backup_dir, 'node', node, backup_id=full_backup_id) + + count = 0 + filelist = self.get_backup_filelist(backup_dir, 'node', last_backup_id) + for file in filelist: + # count only nondata files + if int(filelist[file]['is_datafile']) == 0 and \ + not stat.S_ISDIR(int(filelist[file]['mode'])) and \ + not filelist[file]['size'] == '0' and \ + file != 'database_map': + count += 1 + + gdb = self.restore_node(backup_dir, 'node', node, gdb=True, + backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) + gdb.verbose = False + gdb.set_breakpoint('restore_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(count - 1) + gdb.quit() + + bak_file = os.path.join(node.data_dir, 'global', 'pg_control.pbk.bak') + self.assertTrue( + os.path.exists(bak_file), + "pg_control bak File should not exist: {0}".format(bak_file)) + + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup is not fully restored") + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + if self.pg_config_version >= 120000: + self.assertIn( + "PANIC: could not read file \"global/pg_control\"", + f.read()) + else: + self.assertIn( + "PANIC: could not read from control file", + f.read()) + self.restore_node(backup_dir, 'node', node, + backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) + node.slow_start() + self.compare_pgdata(pgdata, self.pgdata_content(node.data_dir)) + + # @unittest.skip("skip") + def test_skip_pages_at_non_zero_segment_checksum(self): + if self.remote: + self.skipTest("Skipped because this test doesn't work properly in remote mode yet") + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # create table of size > 1 GB, so it will have several segments + node.safe_psql( + 'postgres', + "create table t as select i as a, i*2 as b, i*3 as c, i*4 as d, i*5 as e " + "from generate_series(1,20600000) i; " + "CHECKPOINT ") + + filepath = node.safe_psql( + 'postgres', + "SELECT pg_relation_filepath('t')" + ).decode('utf-8').rstrip() + + # segment .1 must exist in order to proceed this test + self.assertTrue(os.path.exists(f'{os.path.join(node.data_dir, filepath)}.1')) + + # do full backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 101; " + "CHECKPOINT") + + # do incremental backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 201; " + "CHECKPOINT") + + node.stop() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=checksum", "--log-level-console=INFO"]) + + self.assertNotIn('WARNING: Corruption detected in file', self.output, + 'Incremental restore copied pages from .1 datafile segment that were not changed') + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_skip_pages_at_non_zero_segment_lsn(self): + if self.remote: + self.skipTest("Skipped because this test doesn't work properly in remote mode yet") + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # create table of size > 1 GB, so it will have several segments + node.safe_psql( + 'postgres', + "create table t as select i as a, i*2 as b, i*3 as c, i*4 as d, i*5 as e " + "from generate_series(1,20600000) i; " + "CHECKPOINT ") + + filepath = node.safe_psql( + 'postgres', + "SELECT pg_relation_filepath('t')" + ).decode('utf-8').rstrip() + + # segment .1 must exist in order to proceed this test + self.assertTrue(os.path.exists(f'{os.path.join(node.data_dir, filepath)}.1')) + + # do full backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 101; " + "CHECKPOINT") + + # do incremental backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 201; " + "CHECKPOINT") + + node.stop() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn", "--log-level-console=INFO"]) + + self.assertNotIn('WARNING: Corruption detected in file', self.output, + 'Incremental restore copied pages from .1 datafile segment that were not changed') + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/init_test.py b/tests/init_test.py index 94b076fef..4e000c78f 100644 --- a/tests/init_test.py +++ b/tests/init_test.py @@ -56,7 +56,8 @@ def test_success(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", + "ERROR: No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\ncommand line option --pgdata (-D)", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) diff --git a/tests/merge_test.py b/tests/merge_test.py index c789298fd..1d40af7f7 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -2734,5 +2734,48 @@ def test_merge_pg_filenode_map(self): 'postgres', 'select 1') + def test_unfinished_merge(self): + """ Test when parent has unfinished merge with a different backup. """ + self._check_gdb_flag_or_skip_test() + cases = [('fail_merged', 'write_backup_filelist', ['MERGED', 'MERGING', 'OK']), + ('fail_merging', 'pgBackupWriteControl', ['MERGING', 'OK', 'OK'])] + + for name, terminate_at, states in cases: + node_name = 'node_' + name + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, name) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, node_name), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, node_name, node) + self.set_archiving(backup_dir, node_name, node) + node.slow_start() + + full_id=self.backup_node(backup_dir, node_name, node, options=['--stream']) + + backup_id = self.backup_node(backup_dir, node_name, node, backup_type='delta') + second_backup_id = self.backup_node(backup_dir, node_name, node, backup_type='delta') + + gdb = self.merge_backup(backup_dir, node_name, backup_id, gdb=True) + gdb.set_breakpoint(terminate_at) + gdb.run_until_break() + + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + print(self.show_pb(backup_dir, node_name, as_json=False, as_text=True)) + + backup_infos = self.show_pb(backup_dir, node_name) + self.assertEqual(len(backup_infos), len(states)) + for expected, real in zip(states, backup_infos): + self.assertEqual(expected, real['status']) + + with self.assertRaisesRegex(ProbackupException, + f"Full backup {full_id} has unfinished merge with backup {backup_id}"): + self.merge_backup(backup_dir, node_name, second_backup_id, gdb=False) + # 1. Need new test with corrupted FULL backup # 2. different compression levels diff --git a/tests/option_test.py b/tests/option_test.py index eec1bab44..d1e8cb3a6 100644 --- a/tests/option_test.py +++ b/tests/option_test.py @@ -3,7 +3,6 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import locale - class OptionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -16,15 +15,6 @@ def test_help_1(self): help_out.read().decode("utf-8") ) - # @unittest.skip("skip") - def test_version_2(self): - """help options""" - with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: - self.assertIn( - version_out.read().decode("utf-8").strip(), - self.run_pb(["--version"]) - ) - # @unittest.skip("skip") def test_without_backup_path_3(self): """backup command failure without backup mode option""" @@ -34,7 +24,9 @@ def test_without_backup_path_3(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: BACKUP_PATH (-B, --backup-path)', + 'ERROR: No backup catalog path specified.\n' + \ + 'Please specify it either using environment variable BACKUP_PATH or\n' + \ + 'command line option --backup-path (-B)', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -55,7 +47,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: --instance', + 'ERROR: Required parameter not specified: --instance', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -66,7 +58,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)', + 'ERROR: No backup mode specified.\nPlease specify it either using environment variable BACKUP_MODE or\ncommand line option --backup-mode (-b)', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -77,7 +69,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: invalid backup-mode "bad"', + 'ERROR: Invalid backup-mode "bad"', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -103,7 +95,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "option requires an argument -- 'i'", + "Option '-i' requires an argument", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -115,13 +107,8 @@ def test_options_5(self): base_dir=os.path.join(self.module_name, self.fname, 'node')) output = self.init_pb(backup_dir) - self.assertIn( - "INFO: Backup catalog", - output) + self.assertIn(f"INFO: Backup catalog '{backup_dir}' successfully initialized", output) - self.assertIn( - "successfully inited", - output) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -220,12 +207,47 @@ def test_options_5(self): def test_help_6(self): """help options""" if ProbackupTest.enable_nls: - self.test_env['LC_ALL'] = 'ru_RU.utf-8' - with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: - self.assertEqual( - self.run_pb(["--help"]), - help_out.read().decode("utf-8") - ) + if check_locale('ru_RU.utf-8'): + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) + else: + self.skipTest( + "Locale ru_RU.utf-8 doesn't work. You need install ru_RU.utf-8 locale for this test") else: self.skipTest( 'You need configure PostgreSQL with --enabled-nls option for this test') + + # @unittest.skip("skip") + def test_options_no_scale_units(self): + """check --no-scale-units option""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + # check that --no-scale-units option works correctly + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node"]) + self.assertIn(container=output, member="archive-timeout = 5min") + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--no-scale-units"]) + self.assertIn(container=output, member="archive-timeout = 300") + self.assertNotIn(container=output, member="archive-timeout = 300s") + # check that we have now quotes ("") in json output + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--no-scale-units", "--format=json"]) + self.assertIn(container=output, member='"archive-timeout": 300,') + self.assertIn(container=output, member='"retention-redundancy": 0,') + self.assertNotIn(container=output, member='"archive-timeout": "300",') + +def check_locale(locale_name): + ret=True + old_locale = locale.setlocale(locale.LC_CTYPE,"") + try: + locale.setlocale(locale.LC_CTYPE, locale_name) + except locale.Error: + ret=False + finally: + locale.setlocale(locale.LC_CTYPE, old_locale) + return ret diff --git a/tests/page_test.py b/tests/page_test.py index 786374bdb..a66d6d413 100644 --- a/tests/page_test.py +++ b/tests/page_test.py @@ -6,6 +6,7 @@ import subprocess import gzip import shutil +import time class PageTest(ProbackupTest, unittest.TestCase): @@ -893,7 +894,7 @@ def test_page_backup_with_alien_wal_segment(self): "create table t_heap as select i as id, " "md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i;") + "from generate_series(0,10000) i;") alien_node.safe_psql( "postgres", @@ -905,7 +906,7 @@ def test_page_backup_with_alien_wal_segment(self): "create table t_heap_alien as select i as id, " "md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100000) i;") + "from generate_series(0,10000) i;") # copy latest wal segment wals_dir = os.path.join(backup_dir, 'wal', 'alien_node') @@ -916,9 +917,9 @@ def test_page_backup_with_alien_wal_segment(self): file = os.path.join(wals_dir, filename) file_destination = os.path.join( os.path.join(backup_dir, 'wal', 'node'), filename) -# file = os.path.join(wals_dir, '000000010000000000000004') - print(file) - print(file_destination) + start = time.time() + while not os.path.exists(file_destination) and time.time() - start < 20: + time.sleep(0.1) os.remove(file_destination) os.rename(file, file_destination) @@ -1414,3 +1415,50 @@ def test_page_pg_resetxlog(self): # # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) + + def test_page_huge_xlog_record(self): + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_locks_per_transaction': '1000', + 'work_mem': '100MB', + 'temp_buffers': '100MB', + 'wal_buffers': '128MB', + 'wal_level' : 'logical', + }) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Do full backup + self.backup_node(backup_dir, 'node', node, backup_type='full') + show_backup = self.show_pb(backup_dir,'node')[0] + + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "FULL") + + # Originally client had the problem at the transaction that (supposedly) + # deletes a lot of temporary tables (probably it was client disconnect). + # It generated ~40MB COMMIT WAL record. + # + # `pg_logical_emit_message` is much simpler and faster way to generate + # such huge record. + node.safe_psql( + "postgres", + "select pg_logical_emit_message(False, 'z', repeat('o', 60*1000*1000))") + + # Do page backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + show_backup = self.show_pb(backup_dir,'node')[1] + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PAGE") diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 000000000..e2ac18bea --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,13 @@ +# Testgres can be installed in the following ways: +# 1. From a pip package (recommended) +# testgres==1.8.5 +# 2. From a specific Git branch, tag or commit +# git+https://github.com/postgrespro/testgres.git@ +# 3. From a local directory +# /path/to/local/directory/testgres +git+https://github.com/postgrespro/testgres.git@archive-command-exec#egg=testgres-pg_probackup2&subdirectory=testgres/plugins/pg_probackup2 +allure-pytest +deprecation +pexpect +pytest==7.4.3 +pytest-xdist diff --git a/tests/restore_test.py b/tests/restore_test.py index da3ebffb4..b6664252e 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -3,11 +3,11 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import subprocess import sys -from time import sleep from datetime import datetime, timedelta, timezone import hashlib import shutil import json +import stat from shutil import copyfile from testgres import QueryException, StartNodeException from stat import S_ISDIR @@ -1916,7 +1916,9 @@ def test_restore_target_immediate_archive(self): with open(recovery_conf, 'r') as f: self.assertIn("recovery_target = 'immediate'", f.read()) - # @unittest.skip("skip") + # Skipped, because default recovery_target_timeline is 'current' + # Before PBCKP-598 the --recovery-target=latest' option did not work and this test allways passed + @unittest.skip("skip") def test_restore_target_latest_archive(self): """ make sure that recovery_target 'latest' @@ -2995,7 +2997,7 @@ def test_empty_and_mangled_database_map(self): self.output, self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' + 'ERROR: Field "dbOid" is not found in the line 42 of ' 'the file backup_content.control', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) @@ -3011,7 +3013,7 @@ def test_empty_and_mangled_database_map(self): self.output, self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' + 'ERROR: Field "dbOid" is not found in the line 42 of ' 'the file backup_content.control', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) @@ -3707,66 +3709,6 @@ def test_concurrent_restore(self): self.compare_pgdata(pgdata1, pgdata2) self.compare_pgdata(pgdata2, pgdata3) - # skip this test until https://github.com/postgrespro/pg_probackup/pull/399 - @unittest.skip("skip") - def test_restore_issue_313(self): - """ - Check that partially restored PostgreSQL instance cannot be started - """ - backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) - - self.init_pb(backup_dir) - self.add_instance(backup_dir, 'node', node) - self.set_archiving(backup_dir, 'node', node) - node.slow_start() - - # FULL backup - backup_id = self.backup_node(backup_dir, 'node', node) - node.cleanup() - - count = 0 - filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) - for file in filelist: - # count only nondata files - if int(filelist[file]['is_datafile']) == 0 and int(filelist[file]['size']) > 0: - count += 1 - - node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) - node_restored.cleanup() - self.restore_node(backup_dir, 'node', node_restored) - - gdb = self.restore_node(backup_dir, 'node', node, gdb=True, options=['--progress']) - gdb.verbose = False - gdb.set_breakpoint('restore_non_data_file') - gdb.run_until_break() - gdb.continue_execution_until_break(count - 2) - gdb.quit() - - # emulate the user or HA taking care of PG configuration - for fname in os.listdir(node_restored.data_dir): - if fname.endswith('.conf'): - os.rename( - os.path.join(node_restored.data_dir, fname), - os.path.join(node.data_dir, fname)) - - try: - node.slow_start() - # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because backup is not fully restored") - except StartNodeException as e: - self.assertIn( - 'Cannot start node', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - # @unittest.skip("skip") def test_restore_with_waldir(self): """recovery using tablespace-mapping option and page backup""" @@ -3818,3 +3760,173 @@ def test_restore_with_waldir(self): wal_path=os.path.join(node.data_dir, "pg_xlog") self.assertEqual(os.path.islink(wal_path), True) + + # @unittest.skip("skip") + def test_restore_to_latest_timeline(self): + """recovery to latest timeline""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + node.pgbench_init(scale=2) + + before1 = node.table_checksum("pgbench_branches") + backup_id = self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '2', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + before2 = node.table_checksum("pgbench_branches") + self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + # restore from first backup + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "--recovery-target-timeline=latest", "-i", backup_id] + ) + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "latest") + # check recovery-target=latest option for compatibility with previous versions + node.cleanup() + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "--recovery-target=latest", "-i", backup_id] + ) + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "latest") + + # start postgres and promote wal files to latest timeline + node.slow_start() + + # check for the latest updates + after = node.table_checksum("pgbench_branches") + self.assertEqual(before2, after) + + # checking recovery_target_timeline=current is the default option + if self.pg_config_version >= self.version_to_num('12.0'): + node.stop() + node.cleanup() + + # restore from first backup + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "-i", backup_id] + ) + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "current") + + # start postgres with current timeline + node.slow_start() + + # check for the current updates + after = node.table_checksum("pgbench_branches") + self.assertEqual(before1, after) + + def test_restore_issue_313(self): + """ + Check that partially restored PostgreSQL instance cannot be started + """ + self._check_gdb_flag_or_skip_test + node = self.make_simple_node('node', + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + node.cleanup() + + count = 0 + filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) + for file in filelist: + # count only nondata files + if int(filelist[file]['is_datafile']) == 0 and \ + not stat.S_ISDIR(int(filelist[file]['mode'])) and \ + not filelist[file]['size'] == '0' and \ + file != 'database_map': + count += 1 + + node_restored = self.make_simple_node('node_restored') + node_restored.cleanup() + self.restore_node(backup_dir, 'node', node_restored) + + gdb = self.restore_node(backup_dir, 'node', node, gdb=True, options=['--progress']) + gdb.verbose = False + gdb.set_breakpoint('restore_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(count - 1) + gdb.quit() + + # emulate the user or HA taking care of PG configuration + for fname in os.listdir(node_restored.data_dir): + if fname.endswith('.conf'): + os.rename( + os.path.join(node_restored.data_dir, fname), + os.path.join(node.data_dir, fname)) + + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup is not fully restored") + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + if self.pg_config_version >= 120000: + self.assertIn( + "PANIC: could not read file \"global/pg_control\"", + f.read()) + else: + self.assertIn( + "PANIC: could not read from control file", + f.read()) diff --git a/tests/set_backup_test.py b/tests/set_backup_test.py index e789d174a..31334cfba 100644 --- a/tests/set_backup_test.py +++ b/tests/set_backup_test.py @@ -41,7 +41,7 @@ def test_set_backup_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: --instance', + 'ERROR: Required parameter not specified: --instance', e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) diff --git a/tests/show_test.py b/tests/show_test.py index c4b96499d..27b6fab96 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -507,3 +507,39 @@ def test_color_with_no_terminal(self): '[0m', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) + + # @unittest.skip("skip") + def test_tablespace_print_issue_431(self): + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Create tablespace + tblspc_path = os.path.join(node.base_dir, "tblspc") + os.makedirs(tblspc_path) + with node.connect("postgres") as con: + con.connection.autocommit = True + con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path) + con.connection.autocommit = False + con.execute("CREATE TABLE test (id int) TABLESPACE tblspc") + con.execute("INSERT INTO test VALUES (1)") + con.commit() + + full_backup_id = self.backup_node(backup_dir, 'node', node) + self.assertIn("OK", self.show_pb(backup_dir,'node', as_text=True)) + # Check that tablespace info exists. JSON + self.assertIn("tablespace_map", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn("oid", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn("path", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn(tblspc_path, self.show_pb(backup_dir, 'node', as_text=True)) + # Check that tablespace info exists. PLAIN + self.assertIn("tablespace_map", self.show_pb(backup_dir, 'node', backup_id=full_backup_id, as_text=True, as_json=False)) + self.assertIn(tblspc_path, self.show_pb(backup_dir, 'node', backup_id=full_backup_id, as_text=True, as_json=False)) + # Check that tablespace info NOT exists if backup id not provided. PLAIN + self.assertNotIn("tablespace_map", self.show_pb(backup_dir, 'node', as_text=True, as_json=False))