diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..c3ad89568 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,93 @@ +name: Build Probackup + +on: + push: + branches: + - "**" + # Runs triggered by pull requests are disabled to prevent executing potentially unsafe code from public pull requests + # pull_request: + # branches: + # - main + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + + build-win2019: + + runs-on: + - windows-2019 + + env: + zlib_dir: C:\dep\zlib + + steps: + + - uses: actions/checkout@v2 + + - name: Install pacman packages + run: | + $env:PATH += ";C:\msys64\usr\bin" + pacman -S --noconfirm --needed bison flex + + - name: Make zlib + run: | + git clone -b v1.2.11 --depth 1 https://github.com/madler/zlib.git + cd zlib + cmake -DCMAKE_INSTALL_PREFIX:PATH=C:\dep\zlib -G "Visual Studio 16 2019" . + cmake --build . --config Release --target ALL_BUILD + cmake --build . --config Release --target INSTALL + copy C:\dep\zlib\lib\zlibstatic.lib C:\dep\zlib\lib\zdll.lib + copy C:\dep\zlib\bin\zlib.dll C:\dep\zlib\lib + + - name: Get Postgres sources + run: git clone -b REL_14_STABLE https://github.com/postgres/postgres.git + + # Copy ptrack to contrib to build the ptrack extension + # Convert line breaks in the patch file to LF otherwise the patch doesn't apply + - name: Get Ptrack sources + run: | + git clone -b master --depth 1 https://github.com/postgrespro/ptrack.git + Copy-Item -Path ptrack -Destination postgres\contrib -Recurse + (Get-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Raw).Replace("`r`n","`n") | Set-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Force -NoNewline + cd postgres + git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff + + - name: Build Postgres + run: | + $env:PATH += ";C:\msys64\usr\bin" + cd postgres\src\tools\msvc + (Get-Content config_default.pl) -Replace "zlib *=>(.*?)(?=,? *#)", "zlib => '${{ env.zlib_dir }}'" | Set-Content config.pl + cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && .\build.bat" + + - name: Build Probackup + run: cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && perl .\gen_probackup_project.pl `"${{ github.workspace }}`"\postgres" + + - name: Install Postgres + run: | + cd postgres + src\tools\msvc\install.bat postgres_install + + - name: Install Testgres + run: | + git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git + pip3 install psycopg2 ./testgres + + # Grant the Github runner user full control of the workspace for initdb to successfully process the data folder + - name: Test Probackup + run: | + icacls.exe "${{ github.workspace }}" /grant "${env:USERNAME}:(OI)(CI)F" + $env:PATH += ";${{ github.workspace }}\postgres\postgres_install\lib;${{ env.zlib_dir }}\lib" + $Env:LC_MESSAGES = "English" + $Env:PG_CONFIG = "${{ github.workspace }}\postgres\postgres_install\bin\pg_config.exe" + $Env:PGPROBACKUPBIN = "${{ github.workspace }}\postgres\Release\pg_probackup\pg_probackup.exe" + $Env:PG_PROBACKUP_PTRACK = "ON" + If (!$Env:MODE -Or $Env:MODE -Eq "basic") { + $Env:PG_PROBACKUP_TEST_BASIC = "ON" + python -m unittest -v tests + python -m unittest -v tests.init_test + } else { + python -m unittest -v tests.$Env:MODE + } + diff --git a/.gitignore b/.gitignore index c0b4de331..97d323ceb 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,9 @@ # Binaries /pg_probackup +# Generated translated file +/po/ru.mo + # Generated by test suite /regression.diffs /regression.out @@ -50,7 +53,6 @@ /docker-compose.yml /Dockerfile /Dockerfile.in -/run_tests.sh /make_dockerfile.sh /backup_restore.sh diff --git a/.travis.yml b/.travis.yml index 876289e82..074ae3d02 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,23 +1,60 @@ os: linux -dist: bionic +dist: jammy language: c -services: - - docker +cache: ccache + +addons: + apt: + packages: + - sudo + - libc-dev + - bison + - flex + - libreadline-dev + - zlib1g-dev + - libzstd-dev + - libssl-dev + - perl + - libperl-dev + - libdbi-perl + - cpanminus + - locales + - python3 + - python3-dev + - python3-pip + - libicu-dev + - libgss-dev + - libkrb5-dev + - libxml2-dev + - libxslt1-dev + - libldap2-dev + - tcl-dev + - diffutils + - gdb + - gettext + - lcov + - openssh-client + - openssh-server + - libipc-run-perl + - libtime-hires-perl + - libtimedate-perl + - libdbd-pg-perl before_install: - - cp travis/* . + - sudo travis/before-install.sh install: - - ./make_dockerfile.sh - - docker-compose build + - travis/install.sh + +before_script: + - sudo travis/before-script.sh + - travis/before-script-user.sh script: - - docker-compose run tests - # - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests - # - docker run -v $(pwd):/tests --rm centos:7 /tests/travis/backup_restore.sh + - travis/script.sh notifications: email: @@ -26,7 +63,8 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=15 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=16 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=REL_15_STABLE - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE @@ -34,24 +72,28 @@ env: - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=archive + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup_test.BackupTest.test_full_backup + - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup_test.BackupTest.test_full_backup_stream # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=compression -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=delta -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=locking +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=checkdb +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=compression +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=delta +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=locking # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=merge -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=page +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=option +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=page # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=ptrack # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica -# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=retention +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore +# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=time_consuming jobs: allow_failures: - if: env(PG_BRANCH) = master - if: env(PG_BRANCH) = REL9_5_STABLE -# - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) +# - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage #branches: diff --git a/LICENSE b/LICENSE index 0ba831507..66476e8a9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015-2020, Postgres Professional +Copyright (c) 2015-2023, Postgres Professional Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group diff --git a/README.md b/README.md index 060883a28..2279b97a4 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ -[![Build Status](https://travis-ci.com/postgrespro/pg_probackup.svg?branch=master)](https://travis-ci.com/postgrespro/pg_probackup) [![GitHub release](https://img.shields.io/github/v/release/postgrespro/pg_probackup?include_prereleases)](https://github.com/postgrespro/pg_probackup/releases/latest) +[![Build Status](https://travis-ci.com/postgrespro/pg_probackup.svg?branch=master)](https://travis-ci.com/postgrespro/pg_probackup) # pg_probackup `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14; +* PostgreSQL 11, 12, 13, 14, 15, 16 As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -41,9 +41,9 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp ## ptrack support `PTRACK` backup support provided via following options: -* vanilla PostgreSQL 11, 12, 13, 14 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 11, 12, 13 -* Postgres Pro Enterprise 11, 12, 13 +* vanilla PostgreSQL 11, 12, 13, 14, 15, 16 with [ptrack extension](https://github.com/postgrespro/ptrack) +* Postgres Pro Standard 11, 12, 13, 14, 15, 16 +* Postgres Pro Enterprise 11, 12, 13, 14, 15, 16 ## Limitations @@ -66,120 +66,15 @@ For detailed release plans check [Milestones](https://github.com/postgrespro/pg_ ## Installation and Setup ### Windows Installation -Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/2.4.15). +Installers are available in release **assets**. [Latests](https://github.com/postgrespro/pg_probackup/releases/latest). ### Linux Installation -#### pg_probackup for vanilla PostgreSQL -```shell -#DEB Ubuntu|Debian Packages -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg - -#DEB-SRC Packages -sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ - /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} - -#DEB Astra Linix Orel -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} - -#RPM Centos Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo - -#RPM RHEL Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo - -#RPM Oracle Linux Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo - -#SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} - -#RPM SUSE|SLES Packages -zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} -zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo - -#SRPM SUSE|SLES Packages -zypper si pg_probackup-{14,13,12,11,10,9.6} - -#RPM ALT Linux 7 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 8 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 9 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' -sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo -``` -#### pg_probackup for PostgresPro Standard and Enterprise -```shell -#DEB Ubuntu|Debian Packages -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6}-dbg - -#DEB Astra Linix Orel -sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' -sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} - - -#RPM Centos Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo - -#RPM RHEL Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo - -#RPM Oracle Linux Packages -rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 7 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' -sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 8 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' -sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo - -#RPM ALT Linux 9 -sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo -``` +See the [Installation](https://postgrespro.github.io/pg_probackup/#pbk-install) section in the documentation. -Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). +Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-setup). + +For users of Postgres Pro products, commercial editions of pg_probackup are available for installation from the corresponding Postgres Pro product repository. ## Building from source ### Linux @@ -199,7 +94,7 @@ cd && git clone https://github.com/postgrespro/ ### Windows Currently pg_probackup can be build using only MSVC 2013. -Build PostgreSQL using [pgwininstall](https://github.com/postgrespro/pgwininstall) or [PostgreSQL instruction](https://www.postgresql.org/docs/10/install-windows-full.html) with MSVC 2013. +Build PostgreSQL using [pgwininstall](https://github.com/postgrespro/pgwininstall) or [PostgreSQL instruction](https://www.postgresql.org/docs/current/install-windows-full.html) with MSVC 2013. If zlib support is needed, src/tools/msvc/config.pl must contain path to directory with compiled zlib. [Example](https://gist.githubusercontent.com/gsmol/80989f976ce9584824ae3b1bfb00bd87/raw/240032950d4ac4801a79625dd00c8f5d4ed1180c/gistfile1.txt) ```shell @@ -224,3 +119,17 @@ Postgres Professional, Moscow, Russia. ## Credits `pg_probackup` utility is based on `pg_arman`, that was originally written by NTT and then developed and maintained by Michael Paquier. + + +### Localization files (*.po) + +Description of how to add new translation languages. +1. Add a flag --enable-nls in configure. +2. Build postgres. +3. Adding to nls.mk in folder pg_probackup required files in GETTEXT_FILES. +4. In folder pg_probackup do 'make update-po'. +5. As a result, the progname.pot file will be created. Copy the content and add it to the file with the desired language. +6. Adding to nls.mk in folder pg_probackup required language in AVAIL_LANGUAGES. + +For more information, follow the link below: +https://postgrespro.ru/docs/postgresql/12/nls-translator diff --git a/doc/Readme.md b/doc/Readme.md index 756c6aaa0..0e1d64590 100644 --- a/doc/Readme.md +++ b/doc/Readme.md @@ -3,3 +3,6 @@ xmllint --noout --valid probackup.xml xsltproc stylesheet.xsl probackup.xml >pg-probackup.html ``` +> [!NOTE] +>Install ```docbook-xsl``` if you got +>``` "xsl:import : unable to load http://docbook.sourceforge.net/release/xsl/current/xhtml/docbook.xsl"``` \ No newline at end of file diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 86063b843..10e766239 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -2,7 +2,6 @@ doc/src/sgml/pgprobackup.sgml &project; documentation --> - pg_probackup @@ -164,7 +163,7 @@ doc/src/sgml/pgprobackup.sgml recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. - pg_probackup supports PostgreSQL 9.5 or higher. + pg_probackup supports PostgreSQL 11 or higher. @@ -172,7 +171,13 @@ doc/src/sgml/pgprobackup.sgml Overview - Installation and Setup + Quick Start + + + Installation + + + Setup Command-Line Reference @@ -312,7 +317,7 @@ doc/src/sgml/pgprobackup.sgml - + FULL backups contain all the data files required to restore the database cluster. @@ -328,7 +333,7 @@ doc/src/sgml/pgprobackup.sgml - + DELTA backup. In this mode, pg_probackup reads all data files in the data directory and copies only those pages that have changed since the previous backup. This @@ -337,7 +342,7 @@ doc/src/sgml/pgprobackup.sgml - + PAGE backup. In this mode, pg_probackup scans all WAL files in the archive from the moment the previous full or incremental backup was taken. Newly created backups @@ -352,7 +357,7 @@ doc/src/sgml/pgprobackup.sgml - + PTRACK backup. In this mode, PostgreSQL tracks page changes on the fly. Continuous archiving is not necessary for it to operate. Each time a relation page is updated, @@ -413,7 +418,7 @@ doc/src/sgml/pgprobackup.sgml - On Unix systems, for PostgreSQL 10 or lower, + On Unix systems, for PostgreSQL 11, a backup can be made only by the same OS user that has started the PostgreSQL server. For example, if PostgreSQL server is started by user postgres, the backup command must also be run @@ -443,16 +448,575 @@ doc/src/sgml/pgprobackup.sgml parameters and have the same major release number. Depending on cluster configuration, PostgreSQL itself may apply additional restrictions, such as CPU architecture - or libc/libicu versions. + or libc/icu versions. - - - Codestin Search App + + Codestin Search App + + To quickly get started with pg_probackup, complete the steps below. This will set up FULL and DELTA backups in the remote mode and demonstrate some + basic pg_probackup operations. In the following, these terms are used: + + + + + backupPostgreSQL + role used to connect to the PostgreSQL + cluster. + + + + + backupdb — database used to connect to the + PostgreSQL cluster. + + + + + backup_host — host with the backup catalog. + + + + + backup_user — user on + backup_host running all pg_probackup + operations. + + + + + /mnt/backups — directory on + backup_host where the backup catalog is stored. + + + + + postgres_host — host with the + PostgreSQL cluster. + + + + + postgres — user on + postgres_host under which + PostgreSQL cluster processes are running. + + + + + /var/lib/postgresql/16/main — + PostgreSQL data directory on + postgres_host. + + + + + Codestin Search App + + + Install pg_probackup on both backup_host and postgres_host. + + + Set up an SSH connection from backup_host to postgres_host. + + + Configure your database cluster for STREAM backups. + + + Initialize the backup catalog: + +backup_user@backup_host:~$ pg_probackup-16 init -B /mnt/backups +INFO: Backup catalog '/mnt/backups' successfully initialized + + + + Add a backup instance called mydb to the backup catalog: + +backup_user@backup_host:~$ pg_probackup-16 add-instance \ + -B /mnt/backups \ + -D /var/lib/pgpro/std-16/data \ + --instance=node \ + --remote-host=postgres_host \ + --remote-user=postgres +INFO: Instance 'node' successfully initialized + + + + Make a FULL backup: + +backup_user@backup_host:~$ pg_probackup-16 backup \ + -B /mnt/backups \ + -b FULL \ + --instance=node \ + --stream \ + --compress-algorithm=zlib \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN1Q, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN1Q/database/pg_wal/000000010000000000000008 to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/8000028, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 1s +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/800BBD0 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 1s +INFO: Validating backup SCUN1Q +INFO: Backup SCUN1Q data files are valid +INFO: Backup SCUN1Q resident size: 56MB +INFO: Backup SCUN1Q completed + + + + List the backups of the instance: + +backup_user@backup_host:~$ pg_probackup-16 show \ + -B /mnt/backups \ + --instance=node +================================================================================================================================ + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +================================================================================================================================ + node 16 SCUN1Q 2024-05-02 11:17:53+03 FULL STREAM 1/0 12s 40MB 16MB 2.42 0/8000028 0/800BBD0 OK + + + + Make an incremental backup in the DELTA mode: + +backup_user@backup_host:~$ pg_probackup-16 backup \ + -B /mnt/backups \ + -b DELTA \ + --instance=node \ + --stream \ + --compress-algorithm=zlib \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN22, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Parent backup: SCUN1Q +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN22/database/pg_wal/000000010000000000000009 to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/9000028, TLI: 1 +INFO: Parent Start LSN: 0/8000028, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 1s +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/9000168 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 1s +INFO: Validating backup SCUN22 +INFO: Backup SCUN22 data files are valid +INFO: Backup SCUN22 resident size: 34MB +INFO: Backup SCUN22 completed + + + + Add or modify some parameters in the pg_probackup + configuration file, so that you do not have to specify them each time on the command line: + +backup_user@backup_host:~$ pg_probackup-16 set-config \ + -B /mnt/backups \ + --instance=node \ + --remote-host=postgres_host \ + --remote-user=postgres \ + -U backup \ + -d backupdb + + + + Check the configuration of the instance: + +backup_user@backup_host:~$ pg_probackup-16 show-config \ + -B /mnt/backups \ + --instance=node +# Backup instance information +pgdata = /var/lib/pgpro/std-16/data +system-identifier = 7364313570668255886 +xlog-seg-size = 16777216 +# Connection parameters +pgdatabase = backupdb +pghost = postgres_host +pguser = backup +# Replica parameters +replica-timeout = 5min +# Archive parameters +archive-timeout = 5min +# Logging parameters +log-level-console = INFO +log-level-file = OFF +log-format-console = PLAIN +log-format-file = PLAIN +log-filename = pg_probackup.log +log-rotation-size = 0TB +log-rotation-age = 0d +# Retention parameters +retention-redundancy = 0 +retention-window = 0 +wal-depth = 0 +# Compression parameters +compress-algorithm = none +compress-level = 1 +# Remote access parameters +remote-proto = ssh +remote-host = postgres_host +remote-user = postgres + + + Note that the parameters not modified via set-config retain their default values. + + + + Make another incremental backup in the DELTA mode, omitting + the parameters stored in the configuration file earlier: + +backup_user@backup_host:~$ pg_probackup-16 backup \ + -B /mnt/backups \ + -b DELTA \ + --instance=node \ + --stream \ + --compress-algorithm=zlib +INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN2C, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected +INFO: Database backup start +INFO: wait for pg_backup_start() +INFO: Parent backup: SCUN22 +INFO: Wait for WAL segment /mnt/backups/backups/node/SCUN2C/database/pg_wal/00000001000000000000000B to be streamed +INFO: PGDATA size: 96MB +INFO: Current Start LSN: 0/B000028, TLI: 1 +INFO: Parent Start LSN: 0/9000028, TLI: 1 +INFO: Start transferring data files +INFO: Data files are transferred, time elapsed: 0 +INFO: wait for pg_stop_backup() +INFO: pg_stop backup() successfully executed +INFO: stop_lsn: 0/B000168 +INFO: Getting the Recovery Time from WAL +INFO: Syncing backup files to disk +INFO: Backup files are synced, time elapsed: 0 +INFO: Validating backup SCUN2C +INFO: Backup SCUN2C data files are valid +INFO: Backup SCUN2C resident size: 17MB +INFO: Backup SCUN2C completed + + + + List the backups of the instance again: + +backup_user@backup_host:~$ pg_probackup-16 show \ + -B /mnt/backups \ + --instance=node +=================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +=================================================================================================================================== + node 16 SCUN2C 2024-05-02 11:18:13+03 DELTA STREAM 1/1 10s 1139kB 16MB 1.00 0/B000028 0/B000168 OK + node 16 SCUN22 2024-05-02 11:18:04+03 DELTA STREAM 1/1 10s 2357kB 32MB 1.02 0/9000028 0/9000168 OK + node 16 SCUN1Q 2024-05-02 11:17:53+03 FULL STREAM 1/0 12s 40MB 16MB 2.42 0/8000028 0/800BBD0 OK + + + + Restore the data from the latest available backup to an arbitrary location: + +backup_user@backup_host:~$ pg_probackup-16 restore \ + -B /mnt/backups \ + -D /var/lib/pgpro/std-16/staging-data \ + --instance=node +INFO: Validating parents for backup SCUN2C +INFO: Validating backup SCUN1Q +INFO: Backup SCUN1Q data files are valid +INFO: Validating backup SCUN22 +INFO: Backup SCUN22 data files are valid +INFO: Validating backup SCUN2C +INFO: Backup SCUN2C data files are valid +INFO: Backup SCUN2C WAL segments are valid +INFO: Backup SCUN2C is valid. +INFO: Restoring the database from backup SCUN2C on localhost +INFO: Start restoring backup files. PGDATA size: 112MB +INFO: Backup files are restored. Transfered bytes: 112MB, time elapsed: 0 +INFO: Restore incremental ratio (less is better): 100% (112MB/112MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 2s +INFO: Restore of backup SCUN2C completed. + + + + + + + Codestin Search App + + Codestin Search App + + You may need to use apt-get instead of apt on older systems in the commands below. + + + + + Add the pg_probackup repository GPG key + + +sudo apt install gpg wget +wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | \ +sudo tee /etc/apt/trusted.gpg.d/pg_probackup.asc + + + + + Setup the binary package repository + + +. /etc/os-release +echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb $VERSION_CODENAME main-$VERSION_CODENAME" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + + Optionally setup the source package repository for rebuilding the binaries + + +echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb $VERSION_CODENAME main-$VERSION_CODENAME" | \ +sudo tee -a /etc/apt/sources.list.d/pg_probackup.list + + + + + List the available pg_probackup packages + + + + + Using apt: + + +sudo apt update +apt search pg_probackup + + + + + Using apt-get: + + +sudo apt-get update +apt-cache search pg_probackup + + + + + + + Install or upgrade a pg_probackup version of your choice + + +sudo apt install pg-probackup-16 + + + + + Optionally install the debug package + + +sudo apt install pg-probackup-16-dbg + + + + + Optionally install the source package (provided you have set up the source package repository as described above) + + +sudo apt install dpkg-dev +sudo apt source pg-probackup-16 + + + + + + Codestin Search App + + You may need to use yum instead of dnf on older systems in the commands below. + + + + + Install the pg_probackup repository + + +dnf install https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm + + + + + List the available pg_probackup packages + + +dnf search pg_probackup + + + + + Install or upgrade a pg_probackup version of your choice + + +dnf install pg_probackup-16 + + + + + Optionally install the debug package + + +dnf install pg_probackup-16-debuginfo + + + + + Optionally install the source package for rebuilding the binaries + + + + + Using dnf: + + +dnf install 'dnf-command(download)' +dnf download --source pg_probackup-16 + + + + + Using yum: + + +yumdownloader --source pg_probackup-16 + + + + + + + + Codestin Search App + + + + Setup the repository + + + + + On ALT Linux 10: + + +. /etc/os-release +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p$VERSION_ID x86_64 vanilla" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + + On ALT Linux 8 and 9: + + +. /etc/os-release +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-$VERSION_ID x86_64 vanilla" | \ +sudo tee /etc/apt/sources.list.d/pg_probackup.list + + + + + + + List the available pg_probackup packages + + +sudo apt-get update +apt-cache search pg_probackup + + + + + Install or upgrade a pg_probackup version of your choice + + +sudo apt-get install pg_probackup-16 + + + + + Optionally install the debug package + + +sudo apt-get install pg_probackup-16-debuginfo + + + + + + Codestin Search App + + + + Add the pg_probackup repository GPG key + + +zypper in -y gpg wget +wget -O GPG-KEY-PG_PROBACKUP https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP +rpm --import GPG-KEY-PG_PROBACKUP + + + + + Setup the repository + + +zypper in https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm + + + + + List the available pg_probackup packages + + +zypper se pg_probackup + + + + + Install or upgrade a pg_probackup version of your choice + + +zypper in pg_probackup-16 + + + + + Optionally install the source package for rebuilding the binaries + + +zypper si pg_probackup-16 + + + + + + + Codestin Search App Once you have pg_probackup installed, complete the following setup: @@ -534,10 +1098,10 @@ pg_probackup init -B backup_dir To add a new backup instance, run the following command: -pg_probackup add-instance -B backup_dir -D data_dir --instance instance_name [remote_options] +pg_probackup add-instance -B backup_dir -D data_dir --instance=instance_name [remote_options] - where: + Where: @@ -569,10 +1133,9 @@ pg_probackup add-instance -B backup_dir -D backups/instance_name directory contains the pg_probackup.conf configuration file that controls - pg_probackup settings for this backup instance. If you run this - command with the - remote_options, the specified - parameters will be added to pg_probackup.conf. + pg_probackup settings for this backup instance. To add + remote_options to the configuration file, use the + command. For details on how to fine-tune pg_probackup configuration, see @@ -606,33 +1169,22 @@ pg_probackup add-instance -B backup_dir -D backup role is used as an example. + + For security reasons, it is recommended to run the configuration SQL queries below + in a separate database. + + +postgres=# CREATE DATABASE backupdb; +postgres=# \c backupdb + To perform a , the following permissions for role backup are required only in the database used for - connection to the PostgreSQL server: + connection to the PostgreSQL server. - For PostgreSQL 9.5: - - -BEGIN; -CREATE ROLE backup WITH LOGIN; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -COMMIT; - - - For PostgreSQL 9.6: + For PostgreSQL versions 11 — 14: BEGIN; @@ -642,10 +1194,10 @@ GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; @@ -653,7 +1205,7 @@ GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; COMMIT; - For PostgreSQL 10 or higher: + For PostgreSQL 15 or higher: BEGIN; @@ -662,8 +1214,8 @@ GRANT USAGE ON SCHEMA pg_catalog TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; @@ -721,7 +1273,18 @@ COMMIT; - Grant the REPLICATION privilege to the backup role: + If the backup role does not exist, create it with + the REPLICATION privilege when + Configuring the + Database Cluster: + + +CREATE ROLE backup WITH LOGIN REPLICATION; + + + + + If the backup role already exists, grant it with the REPLICATION privilege: ALTER ROLE backup WITH REPLICATION; @@ -810,7 +1373,7 @@ ALTER ROLE backup WITH REPLICATION; parameter, as follows: -archive_command = '"install_dir/pg_probackup" archive-push -B "backup_dir" --instance instance_name --wal-file-name=%f [remote_options]' +archive_command = '"install_dir/pg_probackup" archive-push -B "backup_dir" --instance=instance_name --wal-file-name=%f [remote_options]' @@ -880,7 +1443,7 @@ archive_command = '"install_dir/pg_probackup" archive Codestin Search App - For PostgreSQL 9.6 or higher, pg_probackup can take backups from + pg_probackup can take backups from a standby server. This requires the following additional setup: @@ -1003,12 +1566,12 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Codestin Search App - pg_probackup supports the remote mode that allows to perform - backup, restore and WAL archiving operations remotely. In this - mode, the backup catalog is stored on a local system, while - PostgreSQL instance to backup and/or to restore is located on a - remote system. Currently the only supported remote protocol is - SSH. + pg_probackup supports the remote mode that + allows you to perform backup, restore and WAL archiving operations remotely. + In this mode, the backup catalog is stored on a local system, while + PostgreSQL instance to backup and/or to restore + is located on a remote system. Currently the only supported remote + protocol is SSH. Codestin Search App @@ -1016,73 +1579,84 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; If you are going to use pg_probackup in remote mode via SSH, complete the following steps: - + Install pg_probackup on both systems: backup_host and - db_host. + postgres_host. - For communication between the hosts set up the passwordless - SSH connection between backup user on - backup_host and + For communication between the hosts set up a passwordless + SSH connection between the backup_user user on + backup_host and the postgres user on - db_host: + postgres_host: -[backup@backup_host] ssh-copy-id postgres@db_host +[backup_user@backup_host] ssh-copy-id postgres@postgres_host + + Where: + + + + + backup_host is the system with + backup catalog. + + + + + postgres_host is the system with the PostgreSQL + cluster. + + + + + backup_user is the OS user on + backup_host used to run pg_probackup. + + + + + postgres is the user on + postgres_host under which + PostgreSQL cluster processes are running. + For PostgreSQL 11 or higher a + more secure approach can be used thanks to + allow-group-access feature. + + + If you are going to rely on continuous - WAL archiving, set up passwordless SSH - connection between postgres user on - db_host and backup + WAL archiving, set up a passwordless SSH + connection between the postgres user on + postgres_host and the backup user on backup_host: -[postgres@db_host] ssh-copy-id backup@backup_host +[postgres@postgres_host] ssh-copy-id backup_user@backup_host - - - where: - - - - - backup_host is the system with - backup catalog. - - - db_host is the system with PostgreSQL - cluster. + Make sure pg_probackup on postgres_host + can be located when a connection via SSH is made. For example, for Bash, you can + modify PATH in ~/.bashrc of the postgres user + (above the line in bashrc that exits the script for non-interactive shells). + Alternatively, for pg_probackup commands, specify the path to the directory + containing the pg_probackup binary on postgres_host via + the --remote-path option. - - - backup is the OS user on - backup_host used to run pg_probackup. - - - - - postgres is the OS user on - db_host used to start the PostgreSQL - cluster. For PostgreSQL 11 or higher a - more secure approach can be used thanks to - allow-group-access - feature. - - - + pg_probackup in the remote mode via SSH works as follows: @@ -1132,10 +1706,10 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; The main process is usually started on backup_host and connects to - db_host, but in case of + postgres_host, but in case of archive-push and archive-get commands the main process - is started on db_host and connects to + is started on postgres_host and connects to backup_host. @@ -1156,7 +1730,7 @@ GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; Compression is always done on - db_host, while decompression is always done on + postgres_host, while decompression is always done on backup_host. @@ -1248,41 +1822,16 @@ CREATE EXTENSION ptrack; To create a backup, run the following command: -pg_probackup backup -B backup_dir --instance instance_name -b backup_mode +pg_probackup backup -B backup_dir --instance=instance_name -b backup_mode Where backup_mode can take one of the following values: + FULL, + DELTA, + PAGE, and + PTRACK. - - - - FULL — creates a full backup that contains all the data - files of the cluster to be restored. - - - - - DELTA — reads all data files in the data directory and - creates an incremental backup for pages that have changed - since the previous backup. - - - - - PAGE — creates an incremental backup based on the WAL - files that have been generated since the previous full or - incremental backup was taken. Only changed blocks are read - from data files. - - - - - PTRACK — creates an incremental backup tracking page - changes on the fly. - - - When restoring a cluster from an incremental backup, pg_probackup relies on the parent full backup and all the @@ -1299,7 +1848,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL +pg_probackup backup -B backup_dir --instance=instance_name -b FULL ARCHIVE backups rely on @@ -1329,7 +1878,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --stream --temp-slot +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --stream --temp-slot The optional flag ensures that @@ -1422,7 +1971,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --external-dirs=/etc/dir1:/etc/dir2 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --external-dirs=/etc/dir1:/etc/dir2 Similarly, to include C:\dir1 and @@ -1430,7 +1979,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --external-dirs=C:\dir1;C:\dir2 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --external-dirs=C:\dir1;C:\dir2 pg_probackup recursively copies the contents @@ -1458,7 +2007,7 @@ pg_probackup backup -B backup_dir --instance -pg_probackup checkdb [-B backup_dir [--instance instance_name]] [-D data_dir] [connection_options] +pg_probackup checkdb [-B backup_dir [--instance=instance_name]] [-D data_dir] [connection_options] @@ -1556,7 +2105,7 @@ pg_probackup checkdb --amcheck --skip-block-validation [connection_ this command: -pg_probackup validate -B backup_dir --instance instance_name --recovery-target-xid=4242 +pg_probackup validate -B backup_dir --instance=instance_name --recovery-target-xid=4242 If validation completes successfully, pg_probackup displays the @@ -1576,11 +2125,11 @@ pg_probackup validate -B backup_dir --instance For example, to check that you can restore the database cluster - from a backup copy with the PT8XFX backup ID up to the + from a backup copy with the SCUN2C backup ID up to the specified timestamp, run this command: - -pg_probackup validate -B backup_dir --instance instance_name -i PT8XFX --recovery-target-time="2017-05-18 14:18:11+03" + +pg_probackup validate -B backup_dir --instance=instance_name -i SCUN2C --recovery-target-time="2024-05-03 11:18:13+03" If you specify the backup_id of an incremental backup, @@ -1598,10 +2147,10 @@ pg_probackup validate -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name -i backup_id +pg_probackup restore -B backup_dir --instance=instance_name -i backup_id - where: + Where: @@ -1645,7 +2194,7 @@ pg_probackup restore -B backup_dir --instance primary_conninfo parameter; you have to add the password manually or use the --primary-conninfo option, if required. - For PostgreSQL 11 or lower, + For PostgreSQL 11, recovery settings are written into the recovery.conf file. Starting from PostgreSQL 12, pg_probackup writes these settings into @@ -1682,7 +2231,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name -D data_dir -j 4 -i backup_id -T tablespace1_dir=tablespace1_newdir -T tablespace2_dir=tablespace2_newdir +pg_probackup restore -B backup_dir --instance=instance_name -D data_dir -j 4 -i backup_id -T tablespace1_dir=tablespace1_newdir -T tablespace2_dir=tablespace2_newdir @@ -1714,7 +2263,7 @@ pg_probackup restore -B backup_dir --instance command with the following options: -pg_probackup restore -B backup_dir --instance instance_name -D data_dir -I incremental_mode +pg_probackup restore -B backup_dir --instance=instance_name -D data_dir -I incremental_mode Where incremental_mode can take one of the @@ -1733,7 +2282,7 @@ pg_probackup restore -B backup_dir --instance LSN — read the pg_control in the - data directory to obtain redo LSN and redo TLI, which allows + data directory to obtain redo LSN and redo TLI, which allows you to determine a point in history(shiftpoint), where data directory state shifted from target backup chain history. If shiftpoint is not within reach of backup chain history, then restore is aborted. @@ -1774,29 +2323,36 @@ pg_probackup restore -B backup_dir --instance - -============================================================================================================================================= - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -============================================================================================================================================= - node 12 QBRNBP 2020-06-11 17:40:58+03 DELTA ARCHIVE 16/15 40s 194MB 16MB 8.26 15/2C000028 15/2D000128 OK - node 12 QBRIDX 2020-06-11 15:51:42+03 PAGE ARCHIVE 15/15 11s 18MB 16MB 5.10 14/DC000028 14/DD0000B8 OK - node 12 QBRIAJ 2020-06-11 15:51:08+03 PAGE ARCHIVE 15/15 20s 141MB 96MB 6.22 14/D4BABFE0 14/DA9871D0 OK - node 12 QBRHT8 2020-06-11 15:45:56+03 FULL ARCHIVE 15/0 2m:11s 1371MB 416MB 10.93 14/9D000028 14/B782E9A0 OK - -pg_probackup restore -B /backup --instance node -R -I lsn -INFO: Running incremental restore into nonempty directory: "/var/lib/pgsql/12/data" -INFO: Destination directory redo point 15/2E000028 on tli 16 is within reach of backup QBRIDX with Stop LSN 14/DD0000B8 on tli 15 -INFO: shift LSN: 14/DD0000B8 -INFO: Restoring the database from backup at 2020-06-11 17:40:58+03 -INFO: Extracting the content of destination directory for incremental restore -INFO: Destination directory content extracted, time elapsed: 1s -INFO: Removing redundant files in destination directory -INFO: Redundant files are removed, time elapsed: 1s -INFO: Start restoring backup files. PGDATA size: 15GB -INFO: Backup files are restored. Transfered bytes: 1693MB, time elapsed: 43s -INFO: Restore incremental ratio (less is better): 11% (1693MB/15GB) -INFO: Restore of backup QBRNBP completed. - + +====================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +====================================================================================================================================== + node 16 SCUN3Y 2024-05-02 11:19:16+03 DELTA STREAM 16/15 7s 92MB 208MB 2.27 0/3C0043A8 0/46159C70 OK + node 16 SCUN3M 2024-05-02 11:19:01+03 PTRACK STREAM 15/15 10s 30MB 16MB 2.23 0/32000028 0/32005ED0 OK + node 16 SCUN39 2024-05-02 11:18:50+03 PAGE STREAM 15/15 12s 46MB 32MB 1.44 0/2A000028 0/2B0000B8 OK + node 16 SCUN2V 2024-05-02 11:18:38+03 FULL STREAM 15/0 11s 154MB 16MB 2.32 0/23000028 0/23000168 OK + +backup_user@backup_host:~$ pg_probackup-16 restore -B /mnt/backups --instance=node -R -I lsn +INFO: Destination directory and tablespace directories are empty, disable incremental restore +INFO: Validating parents for backup SCUN3Y +INFO: Validating backup SCUN2V +INFO: Backup SCUN2V data files are valid +INFO: Validating backup SCUN39 +INFO: Backup SCUN39 data files are valid +INFO: Validating backup SCUN3M +INFO: Backup SCUN3M data files are valid +INFO: Validating backup SCUN3Y +INFO: Backup SCUN3Y data files are valid +INFO: Backup SCUN3Y WAL segments are valid +INFO: Backup SCUN3Y is valid. +INFO: Restoring the database from backup SCUN3Y +INFO: Start restoring backup files. PGDATA size: 759MB +INFO: Backup files are restored. Transfered bytes: 759MB, time elapsed: 3s +INFO: Restore incremental ratio (less is better): 100% (759MB/759MB) +INFO: Syncing restored files to disk +INFO: Restored backup files are synced, time elapsed: 1s +INFO: Restore of backup SCUN3Y completed. + Incremental restore is possible only for backups with @@ -1820,7 +2376,7 @@ INFO: Restore of backup QBRNBP completed. with the following options: -pg_probackup restore -B backup_dir --instance instance_name --db-include=database_name +pg_probackup restore -B backup_dir --instance=instance_name --db-include=database_name The option can be specified @@ -1829,14 +2385,14 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --db-include=db1 --db-include=db2 +pg_probackup restore -B backup_dir --instance=instance_name --db-include=db1 --db-include=db2 To exclude one or more databases from restore, use the option: -pg_probackup restore -B backup_dir --instance instance_name --db-exclude=database_name +pg_probackup restore -B backup_dir --instance=instance_name --db-exclude=database_name The option can be specified @@ -1845,7 +2401,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --db-exclude=db1 --db-exclude=db2 +pg_probackup restore -B backup_dir --instance=instance_name --db-exclude=db1 --db-exclude=db2 Partial restore relies on lax behavior of PostgreSQL recovery @@ -1907,7 +2463,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-time="2017-05-18 14:18:11+03" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-time="2024-05-03 11:18:13+03" @@ -1916,7 +2472,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-xid option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-xid=687 +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-xid=687 @@ -1925,7 +2481,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-lsn option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-lsn=16/B374D848 +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-lsn=16/B374D848 @@ -1934,7 +2490,7 @@ pg_probackup restore -B backup_dir --instance --recovery-target-name option: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target-name="before_app_upgrade" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target-name="before_app_upgrade" @@ -1944,7 +2500,7 @@ pg_probackup restore -B backup_dir --instance latest value: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target="latest" +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target="latest" @@ -1954,7 +2510,7 @@ pg_probackup restore -B backup_dir --instance immediate value: -pg_probackup restore -B backup_dir --instance instance_name --recovery-target='immediate' +pg_probackup restore -B backup_dir --instance=instance_name --recovery-target='immediate' @@ -1962,7 +2518,7 @@ pg_probackup restore -B backup_dir --instance Codestin Search App - pg_probackup supports the remote mode that allows to perform + pg_probackup supports the remote mode that allows you to perform backup and restore operations remotely via SSH. In this mode, the backup catalog is stored on a local system, while PostgreSQL instance to be backed @@ -1975,6 +2531,15 @@ pg_probackup restore -B backup_dir --instance + + + In addition to SSH connection, pg_probackup uses + a regular connection to the database to manage the remote operation. + See the section Configuring + the Database Cluster for details of how to set up + a database connection. + + The typical workflow is as follows: @@ -1983,8 +2548,7 @@ pg_probackup restore -B backup_dir --instance On your backup host, configure pg_probackup as explained in the section - Installation and - Setup. For the + Setup. For the and commands, make sure to specify remote @@ -2030,7 +2594,7 @@ pg_probackup restore -B backup_dir --instance 2302, run: -pg_probackup backup -B backup_dir --instance instance_name -b FULL --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 To restore the latest available backup on a remote system with host address @@ -2038,7 +2602,7 @@ pg_probackup backup -B backup_dir --instance 2302, run: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 Restoring an ARCHIVE backup or performing PITR in the remote mode @@ -2065,20 +2629,20 @@ pg_probackup restore -B backup_dir --instance 2303, run: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --archive-host=192.168.0.3 --archive-port=2303 --archive-user=backup +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --archive-host=192.168.0.3 --archive-port=2303 --archive-user=backup Provided arguments will be used to construct the restore_command: -restore_command = '"install_dir/pg_probackup" archive-get -B "backup_dir" --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' +restore_command = '"install_dir/pg_probackup" archive-get -B "backup_dir" --instance=instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' Alternatively, you can use the option to provide the entire restore_command: -pg_probackup restore -B backup_dir --instance instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --restore-command='"install_dir/pg_probackup" archive-get -B "backup_dir" --instance instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' +pg_probackup restore -B backup_dir --instance=instance_name --remote-user=postgres --remote-host=192.168.0.2 --remote-port=2302 --restore-command='"install_dir/pg_probackup" archive-get -B "backup_dir" --instance=instance_name --wal-file-path=%p --wal-file-name=%f --remote-host=192.168.0.3 --remote-port=2303 --remote-user=backup' @@ -2107,7 +2671,7 @@ pg_probackup restore -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL -j 4 +pg_probackup backup -B backup_dir --instance=instance_name -b FULL -j 4 @@ -2168,14 +2732,14 @@ pg_probackup backup -B backup_dir --instance set-config command: -pg_probackup set-config -B backup_dir --instance instance_name +pg_probackup set-config -B backup_dir --instance=instance_name [--external-dirs=external_directory_path] [remote_options] [connection_options] [retention_options] [logging_options] To view the current settings, run the following command: -pg_probackup show-config -B backup_dir --instance instance_name +pg_probackup show-config -B backup_dir --instance=instance_name You can override the settings defined in pg_probackup.conf when @@ -2249,16 +2813,16 @@ pg_probackup show -B backup_dir pg_probackup displays the list of all the available backups. For example: - + BACKUP INSTANCE 'node' ====================================================================================================================================== - Instance Version ID Recovery time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status ====================================================================================================================================== - node 10 PYSUE8 2019-10-03 15:51:48+03 FULL ARCHIVE 1/0 16s 9047kB 16MB 4.31 0/12000028 0/12000160 OK - node 10 P7XDQV 2018-04-29 05:32:59+03 DELTA STREAM 1/1 11s 19MB 16MB 1.00 0/15000060 0/15000198 OK - node 10 P7XDJA 2018-04-29 05:28:36+03 PTRACK STREAM 1/1 21s 32MB 32MB 1.00 0/13000028 0/13000198 OK - node 10 P7XDHU 2018-04-29 05:27:59+03 PAGE STREAM 1/1 15s 33MB 16MB 1.00 0/11000028 0/110001D0 OK - node 10 P7XDHB 2018-04-29 05:27:15+03 FULL STREAM 1/0 11s 39MB 16MB 1.00 0/F000028 0/F000198 OK + node 16 SCUN4E 2024-05-02 11:19:37+03 FULL ARCHIVE 1/0 13s 239MB 16MB 2.31 0/4C000028 0/4D0000B8 OK + node 16 SCUN3Y 2024-05-02 11:19:16+03 DELTA STREAM 1/1 7s 92MB 208MB 2.27 0/3C0043A8 0/46159C70 OK + node 16 SCUN3M 2024-05-02 11:19:01+03 PTRACK STREAM 1/1 10s 30MB 16MB 2.23 0/32000028 0/32005ED0 OK + node 16 SCUN39 2024-05-02 11:18:50+03 PAGE STREAM 1/1 12s 46MB 32MB 1.44 0/2A000028 0/2B0000B8 OK + node 16 SCUN2V 2024-05-02 11:18:38+03 FULL STREAM 1/0 11s 154MB 16MB 2.32 0/23000028 0/23000168 OK For each backup, the following information is provided: @@ -2408,12 +2972,12 @@ BACKUP INSTANCE 'node' show command with the backup ID: -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id The sample output is as follows: - + #Configuration backup-mode = FULL stream = false @@ -2423,27 +2987,26 @@ from-replica = false #Compatibility block-size = 8192 -wal-block-size = 8192 +xlog-block-size = 8192 checksum-version = 1 -program-version = 2.1.3 -server-version = 10 +program-version = 2.5.15 +server-version = 16 #Result backup info timelineid = 1 -start-lsn = 0/04000028 -stop-lsn = 0/040000f8 -start-time = '2017-05-16 12:57:29' -end-time = '2017-05-16 12:57:31' -recovery-xid = 597 -recovery-time = '2017-05-16 12:57:31' -expire-time = '2020-05-16 12:57:31' -data-bytes = 22288792 +start-lsn = 0/4C000028 +stop-lsn = 0/4D0000B8 +start-time = '2024-05-02 11:19:26+03' +end-time = '2024-05-02 11:19:39+03' +recovery-xid = 743 +recovery-time = '2024-05-02 11:19:37+03' +data-bytes = 250827955 wal-bytes = 16777216 -uncompressed-bytes = 39961833 -pgdata-bytes = 39859393 +uncompressed-bytes = 578216425 +pgdata-bytes = 578216107 status = OK -parent-backup-id = 'PT8XFX' -primary_conninfo = 'user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any' +primary_conninfo = 'user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable' +content-crc = 802820606 Detailed output has additional attributes: @@ -2552,44 +3115,46 @@ primary_conninfo = 'user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmod in the JSON format: -pg_probackup show -B backup_dir --instance instance_name --format=json -i backup_id +pg_probackup show -B backup_dir --instance=instance_name --format=json -i backup_id The sample output is as follows: - + [ - { - "instance": "node", - "backups": [ - { - "id": "PT91HZ", - "parent-backup-id": "PT8XFX", - "backup-mode": "DELTA", - "wal": "ARCHIVE", - "compress-alg": "zlib", - "compress-level": 1, - "from-replica": false, - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.3", - "server-version": "10", - "current-tli": 16, - "parent-tli": 2, - "start-lsn": "0/8000028", - "stop-lsn": "0/8000160", - "start-time": "2019-06-17 18:25:11+03", - "end-time": "2019-06-17 18:25:16+03", - "recovery-xid": 0, - "recovery-time": "2019-06-17 18:25:15+03", - "data-bytes": 106733, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } + { + "instance": "node", + "backups": [ + { + "id": "SCUN4E", + "backup-mode": "FULL", + "wal": "ARCHIVE", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 16, + "parent-tli": 2, + "start-lsn": "0/4C000028", + "stop-lsn": "0/4D0000B8", + "start-time": "2024-05-02 11:19:26+03", + "end-time": "2024-05-02 11:19:39+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:37+03", + "data-bytes": 250827955, + "wal-bytes": 16777216, + "uncompressed-bytes": 578216425, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 802820606 + } + ] + } ] @@ -2600,22 +3165,19 @@ pg_probackup show -B backup_dir --instance -pg_probackup show -B backup_dir [--instance instance_name] --archive +pg_probackup show -B backup_dir [--instance=instance_name] --archive pg_probackup displays the list of all the available WAL files grouped by timelines. For example: - + + ARCHIVE INSTANCE 'node' -=================================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=================================================================================================================================== - 5 1 0/B000000 00000005000000000000000B 00000005000000000000000C 2 685kB 48.00 0 OK - 4 3 0/18000000 000000040000000000000018 00000004000000000000001A 3 648kB 77.00 0 OK - 3 2 0/15000000 000000030000000000000015 000000030000000000000017 3 648kB 77.00 0 OK - 2 1 0/B000108 00000002000000000000000B 000000020000000000000015 5 892kB 94.00 1 DEGRADED - 1 0 0/0 000000010000000000000001 00000001000000000000000A 10 8774kB 19.00 1 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000019 00000001000000000000004D 53 848MB 1.00 5 OK For each timeline, the following information is provided: @@ -2699,219 +3261,176 @@ ARCHIVE INSTANCE 'node' format, run the command: -pg_probackup show -B backup_dir [--instance instance_name] --archive --format=json +pg_probackup show -B backup_dir [--instance=instance_name] --archive --format=json The sample output is as follows: - + [ - { - "instance": "replica", - "timelines": [ - { - "tli": 5, - "parent-tli": 1, - "switchpoint": "0/B000000", - "min-segno": "00000005000000000000000B", - "max-segno": "00000005000000000000000C", - "n-segments": 2, - "size": 685320, - "zratio": 48.00, - "closest-backup-id": "PXS92O", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 4, - "parent-tli": 3, - "switchpoint": "0/18000000", - "min-segno": "000000040000000000000018", - "max-segno": "00000004000000000000001A", - "n-segments": 3, - "size": 648625, - "zratio": 77.00, - "closest-backup-id": "PXS9CE", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 3, - "parent-tli": 2, - "switchpoint": "0/15000000", - "min-segno": "000000030000000000000015", - "max-segno": "000000030000000000000017", - "n-segments": 3, - "size": 648911, - "zratio": 77.00, - "closest-backup-id": "PXS9CE", - "status": "OK", - "lost-segments": [], - "backups": [] - }, - { - "tli": 2, - "parent-tli": 1, - "switchpoint": "0/B000108", - "min-segno": "00000002000000000000000B", - "max-segno": "000000020000000000000015", - "n-segments": 5, - "size": 892173, - "zratio": 94.00, - "closest-backup-id": "PXS92O", - "status": "DEGRADED", - "lost-segments": [ - { - "begin-segno": "00000002000000000000000D", - "end-segno": "00000002000000000000000E" - }, - { - "begin-segno": "000000020000000000000010", - "end-segno": "000000020000000000000012" - } - ], - "backups": [ - { - "id": "PXS9CE", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 2, - "parent-tli": 0, - "start-lsn": "0/C000028", - "stop-lsn": "0/C000160", - "start-time": "2019-09-13 21:43:26+03", - "end-time": "2019-09-13 21:43:30+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:43:29+03", - "data-bytes": 104674852, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - }, - { - "tli": 1, - "parent-tli": 0, - "switchpoint": "0/0", - "min-segno": "000000010000000000000001", - "max-segno": "00000001000000000000000A", - "n-segments": 10, - "size": 8774805, - "zratio": 19.00, - "closest-backup-id": "", - "status": "OK", - "lost-segments": [], - "backups": [ - { - "id": "PXS92O", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "true", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 0, - "start-lsn": "0/4000028", - "stop-lsn": "0/6000028", - "start-time": "2019-09-13 21:37:36+03", - "end-time": "2019-09-13 21:38:45+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:30+03", - "data-bytes": 25987319, - "wal-bytes": 50331648, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } - ] - }, - { - "instance": "master", - "timelines": [ - { - "tli": 1, - "parent-tli": 0, - "switchpoint": "0/0", - "min-segno": "000000010000000000000001", - "max-segno": "00000001000000000000000B", - "n-segments": 11, - "size": 8860892, - "zratio": 20.00, - "status": "OK", - "lost-segments": [], - "backups": [ - { - "id": "PXS92H", - "parent-backup-id": "PXS92C", - "backup-mode": "PAGE", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 1, - "start-lsn": "0/4000028", - "stop-lsn": "0/50000B8", - "start-time": "2019-09-13 21:37:29+03", - "end-time": "2019-09-13 21:37:31+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:30+03", - "data-bytes": 1328461, - "wal-bytes": 33554432, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - }, - { - "id": "PXS92C", - "backup-mode": "FULL", - "wal": "ARCHIVE", - "compress-alg": "none", - "compress-level": 1, - "from-replica": "false", - "block-size": 8192, - "xlog-block-size": 8192, - "checksum-version": 1, - "program-version": "2.1.5", - "server-version": "10", - "current-tli": 1, - "parent-tli": 0, - "start-lsn": "0/2000028", - "stop-lsn": "0/2000160", - "start-time": "2019-09-13 21:37:24+03", - "end-time": "2019-09-13 21:37:29+03", - "recovery-xid": 0, - "recovery-time": "2019-09-13 21:37:28+03", - "data-bytes": 24871902, - "wal-bytes": 16777216, - "primary_conninfo": "user=backup passfile=/var/lib/pgsql/.pgpass port=5432 sslmode=disable sslcompression=1 target_session_attrs=any", - "status": "OK" - } - ] - } - ] - } + { + "instance": "node", + "timelines": [ + { + "tli": 1, + "parent-tli": 0, + "switchpoint": "0/0", + "min-segno": "000000010000000000000019", + "max-segno": "00000001000000000000004D", + "n-segments": 53, + "size": 889192448, + "zratio": 1.00, + "closest-backup-id": "", + "status": "OK", + "lost-segments": [], + "backups": [ + { + "id": "SCUN4E", + "backup-mode": "FULL", + "wal": "ARCHIVE", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 0, + "start-lsn": "0/4C000028", + "stop-lsn": "0/4D0000B8", + "start-time": "2024-05-02 11:19:26+03", + "end-time": "2024-05-02 11:19:39+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:37+03", + "data-bytes": 250827955, + "wal-bytes": 16777216, + "uncompressed-bytes": 578216425, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 802820606 + }, + { + "id": "SCUN3Y", + "parent-backup-id": "SCUN3M", + "backup-mode": "DELTA", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/3C0043A8", + "stop-lsn": "0/46159C70", + "start-time": "2024-05-02 11:19:10+03", + "end-time": "2024-05-02 11:19:17+03", + "recovery-xid": 743, + "recovery-time": "2024-05-02 11:19:16+03", + "data-bytes": 96029293, + "wal-bytes": 218103808, + "uncompressed-bytes": 217639806, + "pgdata-bytes": 578216107, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3074300814 + }, + { + "id": "SCUN3M", + "parent-backup-id": "SCUN39", + "backup-mode": "PTRACK", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/32000028", + "stop-lsn": "0/32005ED0", + "start-time": "2024-05-02 11:18:58+03", + "end-time": "2024-05-02 11:19:08+03", + "recovery-xid": 742, + "recovery-time": "2024-05-02 11:19:01+03", + "data-bytes": 31205704, + "wal-bytes": 16777216, + "uncompressed-bytes": 69585790, + "pgdata-bytes": 509927595, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3446949708 + }, + { + "id": "SCUN39", + "parent-backup-id": "SCUN2V", + "backup-mode": "PAGE", + "wal": "STREAM", + "compress-alg": "pglz", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 1, + "start-lsn": "0/2A000028", + "stop-lsn": "0/2B0000B8", + "start-time": "2024-05-02 11:18:45+03", + "end-time": "2024-05-02 11:18:57+03", + "recovery-xid": 741, + "recovery-time": "2024-05-02 11:18:50+03", + "data-bytes": 48381612, + "wal-bytes": 33554432, + "uncompressed-bytes": 69569406, + "pgdata-bytes": 441639083, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 3492989773 + }, + { + "id": "SCUN2V", + "backup-mode": "FULL", + "wal": "STREAM", + "compress-alg": "zlib", + "compress-level": 1, + "from-replica": "false", + "block-size": 8192, + "xlog-block-size": 8192, + "checksum-version": 1, + "program-version": "2.5.15", + "server-version": "16", + "current-tli": 1, + "parent-tli": 0, + "start-lsn": "0/23000028", + "stop-lsn": "0/23000168", + "start-time": "2024-05-02 11:18:31+03", + "end-time": "2024-05-02 11:18:42+03", + "recovery-xid": 740, + "recovery-time": "2024-05-02 11:18:38+03", + "data-bytes": 161084290, + "wal-bytes": 16777216, + "uncompressed-bytes": 373359081, + "pgdata-bytes": 373358763, + "primary_conninfo": "user=backup channel_binding=prefer host=localhost port=5432 sslmode=prefer sslcompression=0 sslcertmode=allow sslsni=1 ssl_min_protocol_version=TLSv1.2 gssencmode=prefer krbsrvname=postgres gssdelegation=0 target_session_attrs=any load_balance_hosts=disable", + "status": "OK", + "content-crc": 1621343133 + } + ] + } + ] + } ] @@ -3001,7 +3520,7 @@ pg_probackup show -B backup_dir [--instance -pg_probackup set-config -B backup_dir --instance instance_name --retention-redundancy=2 --retention-window=7 +pg_probackup set-config -B backup_dir --instance=instance_name --retention-redundancy=2 --retention-window=7 @@ -3019,7 +3538,7 @@ pg_probackup set-config -B backup_dir --instance --delete-expired flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired If you would like to also remove the WAL files that are no @@ -3027,7 +3546,7 @@ pg_probackup delete -B backup_dir --instance --delete-wal flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --delete-wal +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --delete-wal @@ -3038,7 +3557,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --retention-window=7 --retention-redundancy=2 +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --retention-window=7 --retention-redundancy=2 Since incremental backups require that their parent full @@ -3058,48 +3577,48 @@ pg_probackup delete -B backup_dir --instance backup_dir directory, with the option set to 7, and you have the following backups - available on April 10, 2019: + available on May 02, 2024: - + BACKUP INSTANCE 'node' -=================================================================================================================================== - Instance Version ID Recovery time Mode WAL TLI Time Data WAL Zratio Start LSN Stop LSN Status -=================================================================================================================================== - node 10 P7XDHR 2019-04-10 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/18000059 0/18000197 OK - node 10 P7XDQV 2019-04-08 05:32:59+03 PAGE STREAM 1/0 11s 19MB 16MB 1.0 0/15000060 0/15000198 OK - node 10 P7XDJA 2019-04-03 05:28:36+03 DELTA STREAM 1/0 21s 32MB 16MB 1.0 0/13000028 0/13000198 OK - -------------------------------------------------------retention window-------------------------------------------------------- - node 10 P7XDHU 2019-04-02 05:27:59+03 PAGE STREAM 1/0 31s 33MB 16MB 1.0 0/11000028 0/110001D0 OK - node 10 P7XDHB 2019-04-01 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/F000028 0/F000198 OK - node 10 P7XDFT 2019-03-29 05:26:25+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/D000028 0/D000198 OK +===================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +===================================================================================================================================== + node 16 SCUN6L 2024-05-02 11:20:48+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/46000028 0/470000B8 OK + node 16 SCQXUI 2024-04-30 11:20:45+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/44000028 0/450000F0 OK + node 16 SCFTUG 2024-04-24 11:20:43+03 DELTA ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/42000028 0/430000B8 OK +----------------------------------------------------------retention window----------------------------------------------------------- + node 16 SCDZ6D 2024-04-23 11:20:40+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/40000028 0/410000B8 OK + node 16 SCC4HX 2024-04-22 11:20:24+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/3E000028 0/3F0000F0 OK + node 16 SC8F5G 2024-04-20 11:20:07+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/3C0000D8 0/3D00BB58 OK - Even though P7XDHB and P7XDHU backups are outside the + Even though SCC4HX and SCDZ6D backups are outside the retention window, they cannot be removed as it invalidates the - succeeding incremental backups P7XDJA and P7XDQV that are + succeeding incremental backups SCFTUG and SCQXUI that are still required, so, if you run the command with the - flag, only the P7XDFT full + flag, only the SC8F5G full backup will be removed. - With the option, the P7XDJA - backup is merged with the underlying P7XDHU and P7XDHB backups + With the option, the SCFTUG + backup is merged with the underlying SCDZ6D and SCC4HX backups and becomes a full one, so there is no need to keep these expired backups anymore: -pg_probackup delete -B backup_dir --instance node --delete-expired --merge-expired +pg_probackup delete -B backup_dir --instance=node --delete-expired --merge-expired pg_probackup show -B backup_dir - + BACKUP INSTANCE 'node' -================================================================================================================================== - Instance Version ID Recovery time Mode WAL TLI Time Data WAL Zratio Start LSN Stop LSN Status -================================================================================================================================== - node 10 P7XDHR 2019-04-10 05:27:15+03 FULL STREAM 1/0 11s 200MB 16MB 1.0 0/18000059 0/18000197 OK - node 10 P7XDQV 2019-04-08 05:32:59+03 PAGE STREAM 1/0 11s 19MB 16MB 1.0 0/15000060 0/15000198 OK - node 10 P7XDJA 2019-04-03 05:28:36+03 FULL STREAM 1/0 21s 32MB 16MB 1.0 0/13000028 0/13000198 OK +===================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +===================================================================================================================================== + node 16 SCUN6L 2024-05-02 11:20:48+03 FULL ARCHIVE 1/0 5s 296MB 16MB 2.30 0/46000028 0/470000B8 OK + node 16 SCQXUI 2024-04-30 11:20:45+03 PAGE ARCHIVE 1/1 5s 6280kB 16MB 1.00 0/44000028 0/450000F0 OK + node 16 SCFTUG 2024-04-24 11:20:43+03 FULL ARCHIVE 1/1 5s 296MB 16MB 1.00 0/42000028 0/430000B8 OK The Time field for the merged backup displays the time @@ -3115,7 +3634,7 @@ BACKUP INSTANCE 'node' for arbitrary time. For example: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --ttl=30d +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --ttl=30d This command sets the expiration time of the @@ -3127,7 +3646,7 @@ pg_probackup set-backup -B backup_dir --instance --expire-time option. For example: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --expire-time="2020-01-01 00:00:00+03" +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --expire-time="2027-05-02 11:21:00+00" Alternatively, you can use the and @@ -3136,23 +3655,23 @@ pg_probackup set-backup -B backup_dir --instance -pg_probackup backup -B backup_dir --instance instance_name -b FULL --ttl=30d -pg_probackup backup -B backup_dir --instance instance_name -b FULL --expire-time="2020-01-01 00:00:00+03" +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --ttl=30d +pg_probackup backup -B backup_dir --instance=instance_name -b FULL --expire-time="2027-05-02 11:21:00+00" To check if the backup is pinned, run the command: -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id If the backup is pinned, it has the expire-time attribute that displays its expiration time: - + ... -recovery-time = '2017-05-16 12:57:31' -expire-time = '2020-01-01 00:00:00+03' +recovery-time = '2024-05-02 11:21:00+00' +expire-time = '2027-05-02 11:21:00+00' data-bytes = 22288792 ... @@ -3161,7 +3680,7 @@ data-bytes = 22288792 You can unpin the backup by setting the option to zero: -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id --ttl=0 +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id --ttl=0 @@ -3225,19 +3744,18 @@ pg_probackup set-backup -B backup_dir --instance : -pg_probackup show -B backup_dir --instance node +pg_probackup show -B backup_dir --instance=node - -BACKUP INSTANCE 'node' -==================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -==================================================================================================================================== - node 11 PZ9442 2019-10-12 10:43:21+03 DELTA STREAM 1/0 10s 121kB 16MB 1.00 0/46000028 0/46000160 OK - node 11 PZ943L 2019-10-12 10:43:04+03 FULL STREAM 1/0 10s 180MB 32MB 1.00 0/44000028 0/44000160 OK - node 11 PZ7YR5 2019-10-11 19:49:56+03 DELTA STREAM 1/1 10s 112kB 32MB 1.00 0/41000028 0/41000160 OK - node 11 PZ7YMP 2019-10-11 19:47:16+03 DELTA STREAM 1/1 10s 376kB 32MB 1.00 0/3E000028 0/3F0000B8 OK - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - node 11 PZ7YFO 2019-10-11 19:43:04+03 FULL STREAM 1/0 10s 30MB 16MB 1.00 0/2000028 0/200ADD8 OK + +====================================================================================================================================== + Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status +====================================================================================================================================== + node 16 SCUN92 2024-05-02 11:22:16+03 DELTA STREAM 1/1 9s 1162kB 32MB 1.08 0/7C000028 0/7C000168 OK + node 16 SCUN8N 2024-05-02 11:22:09+03 FULL STREAM 1/0 12s 296MB 16MB 2.30 0/7A000028 0/7A009A08 OK + node 16 SCUN8I 2024-05-02 11:21:55+03 DELTA STREAM 1/1 5s 1148kB 32MB 1.01 0/78000028 0/78000168 OK + node 16 SCUN86 2024-05-02 11:21:47+03 DELTA STREAM 1/1 11s 120MB 16MB 2.27 0/76000028 0/760001A0 OK + node 16 SCUN7I 2024-05-02 11:21:29+03 FULL STREAM 1/0 22s 296MB 288MB 2.30 0/63012FE8 0/74E7ADA0 OK + node 16 SCUN71 2024-05-02 11:21:12+03 FULL STREAM 1/0 13s 296MB 272MB 2.30 0/49000028 0/573683B8 OK You can check the state of the WAL archive by running the @@ -3245,28 +3763,30 @@ BACKUP INSTANCE 'node' flag: -pg_probackup show -B backup_dir --instance node --archive +pg_probackup show -B backup_dir --instance=node --archive - + + ARCHIVE INSTANCE 'node' -=============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=============================================================================================================================== - 1 0 0/0 000000010000000000000001 000000010000000000000047 71 36MB 31.00 6 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000048 00000001000000000000007C 53 848MB 1.00 6 OK WAL purge without cannot achieve much, only one segment is removed: -pg_probackup delete -B backup_dir --instance node --delete-wal +pg_probackup delete -B backup_dir --instance=node --delete-wal - + + ARCHIVE INSTANCE 'node' -=============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -=============================================================================================================================== - 1 0 0/0 000000010000000000000002 000000010000000000000047 70 34MB 32.00 6 OK +================================================================================================================================ + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +================================================================================================================================ + 1 0 0/0 000000010000000000000049 00000001000000000000007C 52 832MB 1.00 6 OK If you would like, for example, to keep only those WAL @@ -3274,28 +3794,30 @@ ARCHIVE INSTANCE 'node' option to 1: -pg_probackup delete -B backup_dir --instance node --delete-wal --wal-depth=1 +pg_probackup delete -B backup_dir --instance=node --delete-wal --wal-depth=1 - + + ARCHIVE INSTANCE 'node' -================================================================================================================================ - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status -================================================================================================================================ - 1 0 0/0 000000010000000000000046 000000010000000000000047 2 143kB 228.00 6 OK +=============================================================================================================================== + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status +=============================================================================================================================== + 1 0 0/0 00000001000000000000007C 00000001000000000000007C 1 16MB 1.00 6 OK Alternatively, you can use the option with the command: -pg_probackup backup -B backup_dir --instance node -b DELTA --wal-depth=1 --delete-wal +pg_probackup backup -B backup_dir --instance=node -b DELTA --wal-depth=1 --delete-wal - + + ARCHIVE INSTANCE 'node' =============================================================================================================================== - TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status + TLI Parent TLI Switchpoint Min Segno Max Segno N segments Size Zratio N backups Status =============================================================================================================================== - 1 0 0/0 000000010000000000000048 000000010000000000000049 1 72kB 228.00 7 OK + 1 0 0/0 00000001000000000000007E 00000001000000000000007E 1 16MB 1.00 7 OK @@ -3309,7 +3831,7 @@ ARCHIVE INSTANCE 'node' recent incremental backup you would like to merge: -pg_probackup merge -B backup_dir --instance instance_name -i backup_id +pg_probackup merge -B backup_dir --instance=instance_name -i backup_id This command merges backups that belong to a common incremental backup @@ -3319,7 +3841,7 @@ pg_probackup merge -B backup_dir --instance pg_probackup in the remote mode. @@ -3331,7 +3853,7 @@ pg_probackup merge -B backup_dir --instance -pg_probackup show -B backup_dir --instance instance_name -i backup_id +pg_probackup show -B backup_dir --instance=instance_name -i backup_id If the merge is still in progress, the backup status is @@ -3349,7 +3871,7 @@ pg_probackup show -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name -i backup_id +pg_probackup delete -B backup_dir --instance=instance_name -i backup_id This command will delete the backup with the specified @@ -3365,7 +3887,7 @@ pg_probackup delete -B backup_dir --instance --delete-wal flag: -pg_probackup delete -B backup_dir --instance instance_name --delete-wal +pg_probackup delete -B backup_dir --instance=instance_name --delete-wal To delete backups that are expired according to the current @@ -3373,7 +3895,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired Expired backups cannot be removed while at least one @@ -3384,7 +3906,7 @@ pg_probackup delete -B backup_dir --instance -pg_probackup delete -B backup_dir --instance instance_name --delete-expired --merge-expired +pg_probackup delete -B backup_dir --instance=instance_name --delete-expired --merge-expired In this case, pg_probackup searches for the oldest incremental @@ -3404,7 +3926,7 @@ pg_probackup delete -B backup_dir --instance --status: -pg_probackup delete -B backup_dir --instance instance_name --status=ERROR +pg_probackup delete -B backup_dir --instance=instance_name --status=ERROR @@ -3458,10 +3980,7 @@ pg_probackup delete -B backup_dir --instance DDL commands - CREATE TABLESPACE/DROP TABLESPACE + CREATE TABLESPACE/DROP TABLESPACE cannot be run simultaneously with catchup. @@ -3479,7 +3998,7 @@ pg_probackup delete -B backup_dir --instance To prepare for cloning/synchronizing a PostgreSQL instance, - set up the source instance server as follows: + set up the source server as follows: @@ -3502,7 +4021,7 @@ pg_probackup delete -B backup_dir --instance Before cloning/synchronizing a PostgreSQL instance, ensure that the source - instance server is running and accepting connections. To clone/sync a PostgreSQL instance, + server is running and accepting connections. To clone/sync a PostgreSQL instance, on the server with the destination instance, you can run the command as follows: @@ -3511,25 +4030,25 @@ pg_probackup catchup -b catchup_mode --source-pgdata= Where catchup_mode can take one of the - following values: FULL, DELTA, or PTRACK. + following values: - - FULL — creates a full copy of the PostgreSQL instance. + + FULL — creates a full copy of the PostgreSQL instance. The data directory of the destination instance must be empty for this mode. - - DELTA — reads all data files in the data directory and + + DELTA — reads all data files in the data directory and creates an incremental copy for pages that have changed since the destination instance was shut down. - - PTRACK — tracking page changes on the fly, + + PTRACK — tracking page changes on the fly, only reads and copies pages that have changed since the point of divergence of the source and destination instances. @@ -3545,7 +4064,7 @@ pg_probackup catchup -b catchup_mode --source-pgdata= By specifying the option, you can set STREAM WAL delivery mode of copying, which will include all the necessary WAL files by streaming them from - the instance server via replication protocol. + the server via replication protocol. You can use connection_options to specify @@ -3563,6 +4082,14 @@ pg_probackup catchup -b catchup_mode --source-pgdata= of threads with the option: pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --threads=num_threads + + + + Before cloning/synchronising a PostgreSQL instance, you can run the + catchup command with the flag + to estimate the size of data files to be transferred, but make no changes on disk: + +pg_probackup catchup -b catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir --stream --dry-run @@ -3576,7 +4103,7 @@ pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replic Another example shows how you can add a new remote standby server with the PostgreSQL data directory /replica-pgdata by running the catchup command in the FULL mode on four parallel threads: - + pg_probackup catchup --source-pgdata=/master-pgdata --destination-pgdata=/replica-pgdata -p 5432 -d postgres -U remote-postgres-user --stream --backup-mode=FULL --remote-host=remote-hostname --remote-user=remote-unix-username -j 4 @@ -3634,7 +4161,7 @@ pg_probackup init -B backup_dir [--help] Codestin Search App -pg_probackup add-instance -B backup_dir -D data_dir --instance instance_name [--help] +pg_probackup add-instance -B backup_dir -D data_dir --instance=instance_name [--help] Initializes a new backup instance inside the backup catalog @@ -3652,7 +4179,7 @@ pg_probackup add-instance -B backup_dir -D Codestin Search App -pg_probackup del-instance -B backup_dir --instance instance_name [--help] +pg_probackup del-instance -B backup_dir --instance=instance_name [--help] Deletes all backups and WAL files associated with the @@ -3662,7 +4189,7 @@ pg_probackup del-instance -B backup_dir --instance Codestin Search App -pg_probackup set-config -B backup_dir --instance instance_name +pg_probackup set-config -B backup_dir --instance=instance_name [--help] [--pgdata=pgdata-path] [--retention-redundancy=redundancy][--retention-window=window][--wal-depth=wal_depth] [--compress-algorithm=compression_algorithm] [--compress-level=compression_level] @@ -3688,7 +4215,7 @@ pg_probackup set-config -B backup_dir --instance Codestin Search App -pg_probackup set-backup -B backup_dir --instance instance_name -i backup_id +pg_probackup set-backup -B backup_dir --instance=instance_name -i backup_id {--ttl=ttl | --expire-time=time} [--note=backup_note] [--help] @@ -3719,7 +4246,8 @@ pg_probackup set-backup -B backup_dir --instance Codestin Search App -pg_probackup show-config -B backup_dir --instance instance_name [--format=plain|json] +pg_probackup show-config -B backup_dir --instance=instance_name [--format=plain|json] +[--no-scale-units] [logging_options] Displays the contents of the pg_probackup.conf configuration @@ -3730,6 +4258,18 @@ pg_probackup show-config -B backup_dir --instance JSON format. By default, configuration settings are shown as plain text. + + You can also specify the + option to display time and memory configuration settings in their base (unscaled) units. + Otherwise, the values are scaled to larger units for optimal display. + For example, if archive-timeout is 300, then + 5min is displayed, but if archive-timeout + is 301, then 301s is displayed. + Also, if the option is specified, configuration + settings are displayed without units and for the JSON format, + numeric and boolean values are not enclosed in quotes. This facilitates parsing + the output. + To edit pg_probackup.conf, use the command. @@ -3739,7 +4279,7 @@ pg_probackup show-config -B backup_dir --instance Codestin Search App pg_probackup show -B backup_dir -[--help] [--instance instance_name [-i backup_id | --archive]] [--format=plain|json] [--no-color] +[--help] [--instance=instance_name [-i backup_id | --archive]] [--format=plain|json] [--no-color] Shows the contents of the backup catalog. If @@ -3768,7 +4308,7 @@ pg_probackup show -B backup_dir Codestin Search App -pg_probackup backup -B backup_dir -b backup_mode --instance instance_name +pg_probackup backup -B backup_dir -b backup_mode --instance=instance_name [--help] [-j num_threads] [--progress] [-C] [--stream [-S slot_name] [--temp-slot]] [--backup-pg-log] [--no-validate] [--skip-block-validation] @@ -3788,35 +4328,10 @@ pg_probackup backup -B backup_dir -b bac Specifies the backup mode to use. Possible values are: - - - - - FULL — creates a full backup that contains all the data - files of the cluster to be restored. - - - - - DELTA — reads all data files in the data directory and - creates an incremental backup for pages that have changed - since the previous backup. - - - - - PAGE — creates an incremental PAGE backup based on the WAL - files that have changed since the previous full or - incremental backup was taken. - - - - - PTRACK — creates an incremental PTRACK backup tracking - page changes on the fly. - - - + FULL, + DELTA, + PAGE, and + PTRACK. @@ -3976,7 +4491,7 @@ pg_probackup backup -B backup_dir -b bac Codestin Search App -pg_probackup restore -B backup_dir --instance instance_name +pg_probackup restore -B backup_dir --instance=instance_name [--help] [-D data_dir] [-i backup_id] [-j num_threads] [--progress] [-T OLDDIR=NEWDIR] [--external-mapping=OLDDIR=NEWDIR] [--skip-external-dirs] @@ -3985,6 +4500,7 @@ pg_probackup restore -B backup_dir --instance cmdline] [--primary-conninfo=primary_conninfo] [-S | --primary-slot-name=slot_name] +[-X wal_dir | --waldir=wal_dir] [recovery_target_options] [logging_options] [remote_options] [partial_restore_options] [remote_wal_archive_options] @@ -4152,6 +4668,17 @@ pg_probackup restore -B backup_dir --instance + + + + + + + Specifies the directory where WAL should be stored. + + + + @@ -4175,7 +4702,7 @@ pg_probackup restore -B backup_dir --instance Codestin Search App pg_probackup checkdb -[-B backup_dir] [--instance instance_name] [-D data_dir] +[-B backup_dir] [--instance=instance_name] [-D data_dir] [--help] [-j num_threads] [--progress] [--amcheck [--skip-block-validation] [--checkunique] [--heapallindexed]] [connection_options] [logging_options] @@ -4265,7 +4792,7 @@ pg_probackup checkdb Codestin Search App pg_probackup validate -B backup_dir -[--help] [--instance instance_name] [-i backup_id] +[--help] [--instance=instance_name] [-i backup_id] [-j num_threads] [--progress] [--skip-block-validation] [recovery_target_options] [logging_options] @@ -4293,7 +4820,7 @@ pg_probackup validate -B backup_dir Codestin Search App -pg_probackup merge -B backup_dir --instance instance_name -i backup_id +pg_probackup merge -B backup_dir --instance=instance_name -i backup_id [--help] [-j num_threads] [--progress] [--no-validate] [--no-sync] [logging_options] @@ -4337,7 +4864,7 @@ pg_probackup merge -B backup_dir --instance Codestin Search App -pg_probackup delete -B backup_dir --instance instance_name +pg_probackup delete -B backup_dir --instance=instance_name [--help] [-j num_threads] [--progress] [--retention-redundancy=redundancy][--retention-window=window][--wal-depth=wal_depth] [--delete-wal] {-i backup_id | --delete-expired [--merge-expired] | --merge-expired | --status=backup_status} @@ -4384,7 +4911,7 @@ pg_probackup delete -B backup_dir --instance Codestin Search App -pg_probackup archive-push -B backup_dir --instance instance_name +pg_probackup archive-push -B backup_dir --instance=instance_name --wal-file-name=wal_file_name [--wal-file-path=wal_file_path] [--help] [--no-sync] [--compress] [--no-ready-rename] [--overwrite] [-j num_threads] [--batch-size=batch_size] @@ -4450,7 +4977,7 @@ pg_probackup archive-push -B backup_dir --instance Codestin Search App -pg_probackup archive-get -B backup_dir --instance instance_name --wal-file-path=wal_file_path --wal-file-name=wal_file_name +pg_probackup archive-get -B backup_dir --instance=instance_name --wal-file-path=wal_file_path --wal-file-name=wal_file_name [-j num_threads] [--batch-size=batch_size] [--prefetch-dir=prefetch_dir_path] [--no-validate-wal] [--help] [remote_options] [logging_options] @@ -4482,7 +5009,7 @@ pg_probackup archive-get -B backup_dir --instance catchup_mode --source-pgdata=path_to_pgdata_on_remote_server --destination-pgdata=path_to_local_dir -[--help] [-j | --threads=num_threads] [--stream] +[--help] [-j | --threads=num_threads] [--stream] [--dry-run] [--temp-slot] [-P | --perm-slot] [-S | --slot=slot_name] [--exclude-path=PATHNAME] [-T OLDDIR=NEWDIR] @@ -4492,41 +5019,16 @@ pg_probackup catchup -b catchup_mode Creates a copy of a PostgreSQL instance without using the backup catalog. - - - - - - - Specifies the catchup mode to use. Possible values are: - - - - - FULL — creates a full copy of the PostgreSQL instance. - - - - - DELTA — reads all data files in the data directory and - creates an incremental copy for pages that have changed - since the destination instance was shut down. - - - - - PTRACK — tracking page changes on the fly, - only reads and copies pages that have changed since the point of divergence - of the source and destination instances. - - - PTRACK catchup mode requires PTRACK - not earlier than 2.0 and hence, PostgreSQL not earlier than 11. - - - - - + + + + + + + Specifies the catchup mode to use. Possible values are: + FULL, + DELTA, and + PTRACK. @@ -4566,7 +5068,20 @@ pg_probackup catchup -b catchup_mode Copies the instance in STREAM WAL delivery mode, including all the necessary WAL files by streaming them from - the instance server via replication protocol. + the server via replication protocol. + + + + + + + + + Displays the total size of the files to be transferred by catchup. + This flag initiates a trial run of catchup, which does + not actually create, delete or move files on disk. WAL streaming is skipped with . + This flag also allows you to check that + all the options are correct and cloning/synchronising is ready to run. @@ -4591,17 +5106,6 @@ pg_probackup catchup -b catchup_mode - - - - - Copies the instance in STREAM WAL delivery mode, - including all the necessary WAL files by streaming them from - the instance server via replication protocol. - - - - @@ -4829,8 +5333,7 @@ pg_probackup catchup -b catchup_mode Specifies the LSN of the write-ahead log location up to which - recovery will proceed. Can be used only when restoring - a database cluster of major version 10 or higher. + recovery will proceed. @@ -4852,7 +5355,7 @@ pg_probackup catchup -b catchup_mode If the time zone offset is not specified, the local time zone is used. - Example: --recovery-target-time="2020-01-01 00:00:00+03" + Example: --recovery-target-time="2027-05-02 11:21:00+00" @@ -5042,7 +5545,7 @@ pg_probackup catchup -b catchup_mode If the time zone offset is not specified, the local time zone is used. - Example: --expire-time="2020-01-01 00:00:00+03" + Example: --expire-time="2027-05-02 11:21:00+00" @@ -5175,6 +5678,60 @@ pg_probackup catchup -b catchup_mode + + + + Defines the format of the console log. Only set from the command line. Note that you cannot + specify this option in the pg_probackup.conf configuration file through + the command and that the + command also treats this option specified in the configuration file as an error. + Possible values are: + + + + + plain — sets the plain-text format of the console log. + + + + + json — sets the JSON format of the console log. + + + + + + Default: plain + + + + + + + + + Defines the format of log files used. Possible values are: + + + + + plain — sets the plain-text format of log files. + + + + + json — sets the JSON format of log files. + + + + + + Default: plain + + + + + @@ -5738,350 +6295,9 @@ pg_probackup catchup -b catchup_mode - - Codestin Search App - - This section describes the options related to taking a backup - from standby. - - - - Starting from pg_probackup 2.0.24, backups can be - taken from standby without connecting to the master server, - so these options are no longer required. In lower versions, - pg_probackup had to connect to the master to determine - recovery time — the earliest moment for which you can - restore a consistent state of the database cluster. - - - - - - - - - Deprecated. Specifies the name of the database on the master - server to connect to. The connection is used only for managing - the backup process, so you can connect to any existing - database. Can be set in the pg_probackup.conf using the - command. - - - Default: postgres, the default PostgreSQL database - - - - - - - - - Deprecated. Specifies the host name of the system on which the - master server is running. - - - - - - - - - Deprecated. Specifies the TCP port or the local Unix domain - socket file extension on which the master server is listening - for connections. - - - Default: 5432, the PostgreSQL default port - - - - - - - - - Deprecated. User name to connect as. - - - Default: postgres, - the PostgreSQL default user name - - - - - - - - - - Deprecated. Wait time for WAL segment streaming via - replication, in seconds. By default, pg_probackup waits 300 - seconds. You can also define this parameter in the - pg_probackup.conf configuration file using the - command. - - - Default: 300 sec - - - - - - - - Codestin Search App - - All examples below assume the remote mode of operations via - SSH. If you are planning to run backup and - restore operation locally, skip the - Setup passwordless SSH connection step - and omit all options. - - - Examples are based on Ubuntu 18.04, - PostgreSQL 11, and pg_probackup - 2.2.0. - - - - - backupPostgreSQL - role used for connection to PostgreSQL - cluster. - - - - - backupdb — database used for connection - to PostgreSQL cluster. - - - - - backup_host — host with backup catalog. - - - - - backupman — user on - backup_host running all pg_probackup - operations. - - - - - /mnt/backups — directory on - backup_host where backup catalog is stored. - - - - - postgres_host — host with PostgreSQL - cluster. - - - - - postgres — user on - postgres_host that has started the PostgreSQL cluster. - - - - - /var/lib/postgresql/11/mainPostgreSQL - data directory on postgres_host. - - - - - Codestin Search App - - This scenario illustrates setting up standalone FULL and DELTA backups. - - - - Codestin Search App - -[backupman@backup_host] ssh-copy-id postgres@postgres_host - - - - Codestin Search App - - For security purposes, it is recommended to use a separate - database for backup operations. - - -postgres=# -CREATE DATABASE backupdb; - - - Connect to the backupdb database, create the - probackup role, and grant the following - permissions to this role: - - -backupdb=# -BEGIN; -CREATE ROLE backup WITH LOGIN REPLICATION; -GRANT USAGE ON SCHEMA pg_catalog TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; -GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO backup; -COMMIT; - - - - Codestin Search App - -[backupman@backup_host]$ pg_probackup-11 init -B /mnt/backups -INFO: Backup catalog '/mnt/backups' successfully inited - - - - Codestin Search App - -[backupman@backup_host]$ pg_probackup-11 add-instance -B /mnt/backups --instance pg-11 --remote-host=postgres_host --remote-user=postgres -D /var/lib/postgresql/11/main -INFO: Instance 'node' successfully inited - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b FULL --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YK2, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YK2 -INFO: Backup PZ7YK2 data files are valid -INFO: Backup PZ7YK2 resident size: 196MB -INFO: Backup PZ7YK2 completed - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance pg-11 - -BACKUP INSTANCE 'pg-11' -================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -================================================================================================================================== - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b delta --stream --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YMP, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Parent backup: PZ7YK2 -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YMP -INFO: Backup PZ7YMP data files are valid -INFO: Backup PZ7YMP resident size: 32MB -INFO: Backup PZ7YMP completed - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 set-config -B /mnt/backups --instance pg-11 --remote-host=postgres_host --remote-user=postgres -U backup -d backupdb - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 backup -B /mnt/backups --instance pg-11 -b delta --stream -INFO: Backup start, pg_probackup version: 2.2.0, instance: node, backup ID: PZ7YR5, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: none, compress-level: 1 -INFO: Parent backup: PZ7YMP -INFO: Start transferring data files -INFO: Data files are transferred -INFO: wait for pg_stop_backup() -INFO: pg_stop backup() successfully executed -INFO: Validating backup PZ7YR5 -INFO: Backup PZ7YR5 data files are valid -INFO: Backup PZ7YR5 resident size: 32MB -INFO: Backup PZ7YR5 completed - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 show-config -B /mnt/backups --instance pg-11 - -# Backup instance information -pgdata = /var/lib/postgresql/11/main -system-identifier = 6746586934060931492 -xlog-seg-size = 16777216 -# Connection parameters -pgdatabase = backupdb -pghost = postgres_host -pguser = backup -# Replica parameters -replica-timeout = 5min -# Archive parameters -archive-timeout = 5min -# Logging parameters -log-level-console = INFO -log-level-file = OFF -log-filename = pg_probackup.log -log-rotation-size = 0 -log-rotation-age = 0 -# Retention parameters -retention-redundancy = 0 -retention-window = 0 -wal-depth = 0 -# Compression parameters -compress-algorithm = none -compress-level = 1 -# Remote access parameters -remote-proto = ssh -remote-host = postgres_host - - - Note that we are getting the default values for other options - that were not overwritten by the set-config command. - - - - Codestin Search App - -[backupman@backup_host] pg_probackup-11 show -B /mnt/backups --instance pg-11 - -==================================================================================================================================== - Instance Version ID Recovery Time Mode WAL Mode TLI Time Data WAL Zratio Start LSN Stop LSN Status -==================================================================================================================================== - node 11 PZ7YR5 2019-10-11 19:49:56+03 DELTA STREAM 1/1 10s 112kB 32MB 1.00 0/41000028 0/41000160 OK - node 11 PZ7YMP 2019-10-11 19:47:16+03 DELTA STREAM 1/1 10s 376kB 32MB 1.00 0/3E000028 0/3F0000B8 OK - node 11 PZ7YK2 2019-10-11 19:45:45+03 FULL STREAM 1/0 11s 180MB 16MB 1.00 0/3C000028 0/3C000198 OK - - - - - - Codestin Search App diff --git a/doc/stylesheet.css b/doc/stylesheet.css index 4d84058f5..31464154b 100644 --- a/doc/stylesheet.css +++ b/doc/stylesheet.css @@ -119,7 +119,8 @@ body { } .book code, kbd, pre, samp { - font-family: monospace,monospace; + font-family: monospace,monospace; + font-size: 90%; } .book .txtCommentsWrap { diff --git a/gen_probackup_project.pl b/gen_probackup_project.pl index c24db1228..8143b7d0d 100644 --- a/gen_probackup_project.pl +++ b/gen_probackup_project.pl @@ -13,11 +13,11 @@ BEGIN { $pgsrc = shift @ARGV; if($pgsrc eq "--help"){ - print STDERR "Usage $0 pg-source-dir \n"; - print STDERR "Like this: \n"; - print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro \n"; - print STDERR "May be need input this before: \n"; - print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio 12.0\\VC\\vcvarsall\" amd64\n"; + print STDERR "Usage $0 pg-source-dir\n"; + print STDERR "Like this:\n"; + print STDERR "$0 C:/PgProject/postgresql.10dev/postgrespro\n"; + print STDERR "May need to run this first:\n"; + print STDERR "CALL \"C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat\" amd64\n"; exit 1; } } @@ -133,7 +133,7 @@ sub build_pgprobackup unless (-d 'src/tools/msvc' && -d 'src'); # my $vsVersion = DetermineVisualStudioVersion(); - my $vsVersion = '12.00'; + my $vsVersion = '16.00'; $solution = CreateSolution($vsVersion, $config); diff --git a/nls.mk b/nls.mk new file mode 100644 index 000000000..981c1c4fe --- /dev/null +++ b/nls.mk @@ -0,0 +1,6 @@ +# contrib/pg_probackup/nls.mk +CATALOG_NAME = pg_probackup +AVAIL_LANGUAGES = ru +GETTEXT_FILES = src/help.c +GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) +GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS) diff --git a/packaging/Makefile.test b/packaging/Makefile.test index f5e004f01..11c63619a 100644 --- a/packaging/Makefile.test +++ b/packaging/Makefile.test @@ -130,10 +130,10 @@ build/test_suse: build/test_suse_15.1 build/test_suse_15.2 @echo Suse: done build/test_suse_15.1: build/test_suse_15.1_9.6 build/test_suse_15.1_10 build/test_suse_15.1_11 build/test_suse_15.1_12 build/test_suse_15.1_13 - @echo Rhel 15.1: done + @echo Suse 15.1: done build/test_suse_15.2: build/test_suse_15.2_9.6 build/test_suse_15.2_10 build/test_suse_15.2_11 build/test_suse_15.2_12 build/test_suse_15.2_13 build/test_suse_15.2_14 - @echo Rhel 15.1: done + @echo Suse 15.2: done define test_suse docker rm -f $1_$2_probackup_$(PKG_NAME_SUFFIX)$(PBK_VERSION) >> /dev/null 2>&1 ; \ diff --git a/packaging/pkg/scripts/rpm.sh b/packaging/pkg/scripts/rpm.sh index d03915c20..2fec4a700 100755 --- a/packaging/pkg/scripts/rpm.sh +++ b/packaging/pkg/scripts/rpm.sh @@ -20,7 +20,15 @@ ulimit -n 1024 if [ ${DISTRIB} = 'centos' ] ; then sed -i 's|^baseurl=http://|baseurl=https://|g' /etc/yum.repos.d/*.repo + if [ ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi yum update -y + if [ ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi # PACKAGES NEEDED diff --git a/packaging/test/scripts/rpm.sh b/packaging/test/scripts/rpm.sh index 92804a7f4..87d430ef8 100755 --- a/packaging/test/scripts/rpm.sh +++ b/packaging/test/scripts/rpm.sh @@ -15,7 +15,16 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then # update of rpm package is broken in rhel-7 (26/12/2022) - yum update -y + #yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi + yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi # yum upgrade -y || echo 'some packages in docker failed to upgrade' # yum install -y sudo @@ -68,6 +77,12 @@ if [ ${DISTRIB} == 'centos' ] && [ ${DISTRIB_VERSION} == '8' ]; then dnf -qy module disable postgresql fi +# PGDG doesn't support install of PG-9.6 from repo package anymore +if [ ${PG_VERSION} == '9.6' ] && [ ${DISTRIB_VERSION} == '7' ]; then + # ugly hack: use repo settings from PG10 + sed -i 's/10/9.6/' /etc/yum.repos.d/pgdg-redhat-all.repo +fi + yum install -y postgresql${PG_TOG}-server.x86_64 export PGDATA=/var/lib/pgsql/${PG_VERSION}/data diff --git a/packaging/test/scripts/rpm_forks.sh b/packaging/test/scripts/rpm_forks.sh index 0d72040ed..d57711697 100755 --- a/packaging/test/scripts/rpm_forks.sh +++ b/packaging/test/scripts/rpm_forks.sh @@ -15,7 +15,15 @@ PG_TOG=$(echo $PG_VERSION | sed 's|\.||g') if [ ${DISTRIB} != 'rhel' -o ${DISTRIB_VERSION} != '7' ]; then # update of rpm package is broken in rhel-7 (26/12/2022) + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi yum update -y + if [ ${DISTRIB} = 'centos' -a ${DISTRIB_VERSION} = '8' ]; then + sed -i 's|mirrorlist|#mirrorlist|g' /etc/yum.repos.d/CentOS-*.repo + sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*.repo + fi fi if [ ${PBK_EDITION} == 'ent' ]; then @@ -80,11 +88,13 @@ if [ $PBK_EDITION == 'std' ] ; then # install POSTGRESQL # rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-${DISTRIB_VERSION}-x86_64/pgdg-redhat-repo-latest.noarch.rpm - if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then - rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm - else - rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm - fi + #if [[ ${PG_VERSION} == '11' ]] || [[ ${PG_VERSION} == '12' ]]; then + # rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm + #else + # rpm -ivh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/postgrespro-std-${PG_VERSION}.${DISTRIB}.yum-${PG_VERSION}-0.3.noarch.rpm + #fi + curl -o pgpro-repo-add.sh https://repo.postgrespro.ru/pgpro-${PG_VERSION}/keys/pgpro-repo-add.sh + sh pgpro-repo-add.sh if [[ ${PG_VERSION} == '9.6' ]]; then yum install -y postgrespro${PG_TOG}-server.x86_64 diff --git a/po/LINGUAS b/po/LINGUAS new file mode 100644 index 000000000..562ba4cf0 --- /dev/null +++ b/po/LINGUAS @@ -0,0 +1 @@ +ru diff --git a/po/ru.po b/po/ru.po new file mode 100644 index 000000000..1263675c2 --- /dev/null +++ b/po/ru.po @@ -0,0 +1,1880 @@ +# Russian message translation file for pg_probackup +# Copyright (C) 2022 PostgreSQL Global Development Group +# This file is distributed under the same license as the pg_probackup (PostgreSQL) package. +# Vyacheslav Makarov , 2022. +msgid "" +msgstr "" +"Project-Id-Version: pg_probackup (PostgreSQL)\n" +"Report-Msgid-Bugs-To: bugs@postgrespro.ru\n" +"POT-Creation-Date: 2022-04-08 11:33+0300\n" +"PO-Revision-Date: 2022-MO-DA HO:MI+ZONE\n" +"Last-Translator: Vyacheslav Makarov \n" +"Language-Team: Russian \n" +"Language: ru\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" +"%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" + +#: src/help.c:84 +#, c-format +msgid "" +"\n" +"%s - utility to manage backup/recovery of PostgreSQL database.\n" +msgstr "" +"\n" +"%s - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL.\n" + +#: src/help.c:86 +#, c-format +msgid "" +"\n" +" %s help [COMMAND]\n" +msgstr "" + +#: src/help.c:88 +#, c-format +msgid "" +"\n" +" %s version\n" +msgstr "" + +#: src/help.c:90 +#, c-format +msgid "" +"\n" +" %s init -B backup-path\n" +msgstr "" + +#: src/help.c:92 +#, c-format +msgid "" +"\n" +" %s set-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:93 src/help.c:791 +#, c-format +msgid " [-D pgdata-path]\n" +msgstr "" + +#: src/help.c:94 src/help.c:130 src/help.c:218 +#, c-format +msgid " [--external-dirs=external-directories-paths]\n" +msgstr "" + +#: src/help.c:95 src/help.c:132 src/help.c:305 src/help.c:731 src/help.c:794 +#, c-format +msgid " [--log-level-console=log-level-console]\n" +msgstr "" + +#: src/help.c:96 src/help.c:133 src/help.c:306 src/help.c:732 src/help.c:795 +#, c-format +msgid " [--log-level-file=log-level-file]\n" +msgstr "" + +#: src/help.c:97 src/help.c:134 src/help.c:307 src/help.c:733 src/help.c:796 +#, c-format +msgid " [--log-filename=log-filename]\n" +msgstr "" + +#: src/help.c:98 src/help.c:135 src/help.c:308 src/help.c:734 src/help.c:797 +#, c-format +msgid " [--error-log-filename=error-log-filename]\n" +msgstr "" + +#: src/help.c:99 src/help.c:136 src/help.c:309 src/help.c:735 src/help.c:798 +#, c-format +msgid " [--log-directory=log-directory]\n" +msgstr "" + +#: src/help.c:100 src/help.c:137 src/help.c:310 src/help.c:736 src/help.c:799 +#, c-format +msgid " [--log-rotation-size=log-rotation-size]\n" +msgstr "" + +#: src/help.c:101 src/help.c:800 +#, c-format +msgid " [--log-rotation-age=log-rotation-age]\n" +msgstr "" + +#: src/help.c:102 src/help.c:140 src/help.c:203 src/help.c:313 src/help.c:674 +#: src/help.c:801 +#, c-format +msgid " [--retention-redundancy=retention-redundancy]\n" +msgstr "" + +#: src/help.c:103 src/help.c:141 src/help.c:204 src/help.c:314 src/help.c:675 +#: src/help.c:802 +#, c-format +msgid " [--retention-window=retention-window]\n" +msgstr "" + +#: src/help.c:104 src/help.c:142 src/help.c:205 src/help.c:315 src/help.c:676 +#: src/help.c:803 +#, c-format +msgid " [--wal-depth=wal-depth]\n" +msgstr "" + +#: src/help.c:105 src/help.c:144 src/help.c:235 src/help.c:317 src/help.c:804 +#: src/help.c:948 +#, c-format +msgid " [--compress-algorithm=compress-algorithm]\n" +msgstr "" + +#: src/help.c:106 src/help.c:145 src/help.c:236 src/help.c:318 src/help.c:805 +#: src/help.c:949 +#, c-format +msgid " [--compress-level=compress-level]\n" +msgstr "" + +#: src/help.c:107 src/help.c:232 src/help.c:806 src/help.c:945 +#, c-format +msgid " [--archive-timeout=timeout]\n" +msgstr "" + +#: src/help.c:108 src/help.c:147 src/help.c:259 src/help.c:320 src/help.c:807 +#: src/help.c:1045 +#, c-format +msgid " [-d dbname] [-h host] [-p port] [-U username]\n" +msgstr "" + +#: src/help.c:109 src/help.c:149 src/help.c:174 src/help.c:219 src/help.c:237 +#: src/help.c:247 src/help.c:261 src/help.c:322 src/help.c:449 src/help.c:808 +#: src/help.c:906 src/help.c:950 src/help.c:994 src/help.c:1047 +#, c-format +msgid " [--remote-proto] [--remote-host]\n" +msgstr "" + +#: src/help.c:110 src/help.c:150 src/help.c:175 src/help.c:220 src/help.c:238 +#: src/help.c:248 src/help.c:262 src/help.c:323 src/help.c:450 src/help.c:809 +#: src/help.c:907 src/help.c:951 src/help.c:995 src/help.c:1048 +#, c-format +msgid " [--remote-port] [--remote-path] [--remote-user]\n" +msgstr "" + +#: src/help.c:111 src/help.c:151 src/help.c:176 src/help.c:221 src/help.c:239 +#: src/help.c:249 src/help.c:263 src/help.c:324 src/help.c:451 src/help.c:1049 +#, c-format +msgid " [--ssh-options]\n" +msgstr "" + +#: src/help.c:112 +#, c-format +msgid " [--restore-command=cmdline] [--archive-host=destination]\n" +msgstr "" + +#: src/help.c:113 src/help.c:178 +#, c-format +msgid " [--archive-port=port] [--archive-user=username]\n" +msgstr "" + +#: src/help.c:114 src/help.c:119 src/help.c:123 src/help.c:153 src/help.c:179 +#: src/help.c:188 src/help.c:194 src/help.c:209 src/help.c:214 src/help.c:222 +#: src/help.c:226 src/help.c:240 src/help.c:250 src/help.c:264 +#, c-format +msgid " [--help]\n" +msgstr "" + +#: src/help.c:116 +#, c-format +msgid "" +"\n" +" %s set-backup -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:117 +#, c-format +msgid " -i backup-id [--ttl=interval] [--expire-time=timestamp]\n" +msgstr "" + +#: src/help.c:118 +#, c-format +msgid " [--note=text]\n" +msgstr "" + +#: src/help.c:121 +#, c-format +msgid "" +"\n" +" %s show-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:122 +#, c-format +msgid " [--format=format]\n" +msgstr "" + +#: src/help.c:125 +#, c-format +msgid "" +"\n" +" %s backup -B backup-path -b backup-mode --instance=instance_name\n" +msgstr "" + +#: src/help.c:126 src/help.c:299 +#, c-format +msgid " [-D pgdata-path] [-C]\n" +msgstr "" + +#: src/help.c:127 src/help.c:300 +#, c-format +msgid " [--stream [-S slot-name] [--temp-slot]]\n" +msgstr "" + +#: src/help.c:128 src/help.c:301 +#, c-format +msgid " [--backup-pg-log] [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:129 src/help.c:168 src/help.c:302 src/help.c:433 +#, c-format +msgid " [--no-validate] [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:131 src/help.c:304 +#, c-format +msgid " [--no-sync]\n" +msgstr "" + +#: src/help.c:138 src/help.c:311 +#, c-format +msgid " [--log-rotation-age=log-rotation-age] [--no-color]\n" +msgstr "" + +#: src/help.c:139 src/help.c:312 +#, c-format +msgid " [--delete-expired] [--delete-wal] [--merge-expired]\n" +msgstr "" + +#: src/help.c:143 src/help.c:316 +#, c-format +msgid " [--compress]\n" +msgstr "" + +#: src/help.c:146 src/help.c:319 +#, c-format +msgid " [--archive-timeout=archive-timeout]\n" +msgstr "" + +#: src/help.c:148 src/help.c:260 src/help.c:321 src/help.c:1046 +#, c-format +msgid " [-w --no-password] [-W --password]\n" +msgstr "" + +#: src/help.c:152 +#, c-format +msgid " [--ttl=interval] [--expire-time=timestamp] [--note=text]\n" +msgstr "" + +#: src/help.c:156 +#, c-format +msgid "" +"\n" +" %s restore -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:157 src/help.c:431 +#, c-format +msgid " [-D pgdata-path] [-i backup-id] [-j num-threads]\n" +msgstr "" + +#: src/help.c:158 src/help.c:183 src/help.c:439 src/help.c:552 +#, c-format +msgid " [--recovery-target-time=time|--recovery-target-xid=xid\n" +msgstr "" + +#: src/help.c:159 src/help.c:184 src/help.c:440 src/help.c:553 +#, c-format +msgid " |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n" +msgstr "" + +#: src/help.c:160 src/help.c:185 src/help.c:441 src/help.c:554 +#, c-format +msgid " [--recovery-target-timeline=timeline]\n" +msgstr "" + +#: src/help.c:161 src/help.c:442 +#, c-format +msgid " [--recovery-target=immediate|latest]\n" +msgstr "" + +#: src/help.c:162 src/help.c:186 src/help.c:443 src/help.c:555 +#, c-format +msgid " [--recovery-target-name=target-name]\n" +msgstr "" + +#: src/help.c:163 src/help.c:444 +#, c-format +msgid " [--recovery-target-action=pause|promote|shutdown]\n" +msgstr "" + +#: src/help.c:164 src/help.c:445 src/help.c:793 +#, c-format +msgid " [--restore-command=cmdline]\n" +msgstr "" + +#: src/help.c:165 +#, c-format +msgid " [-R | --restore-as-replica] [--force]\n" +msgstr "" + +#: src/help.c:166 src/help.c:447 +#, c-format +msgid " [--primary-conninfo=primary_conninfo]\n" +msgstr "" + +#: src/help.c:167 src/help.c:448 +#, c-format +msgid " [-S | --primary-slot-name=slotname]\n" +msgstr "" + +#: src/help.c:169 +#, c-format +msgid " [-T OLDDIR=NEWDIR] [--progress]\n" +msgstr "" + +#: src/help.c:170 src/help.c:435 +#, c-format +msgid " [--external-mapping=OLDDIR=NEWDIR]\n" +msgstr "" + +#: src/help.c:171 +#, c-format +msgid " [--skip-external-dirs] [--no-sync]\n" +msgstr "" + +#: src/help.c:172 src/help.c:437 +#, c-format +msgid " [-I | --incremental-mode=none|checksum|lsn]\n" +msgstr "" + +#: src/help.c:173 +#, c-format +msgid " [--db-include | --db-exclude]\n" +msgstr "" + +#: src/help.c:177 +#, c-format +msgid " [--archive-host=hostname]\n" +msgstr "" + +#: src/help.c:181 +#, c-format +msgid "" +"\n" +" %s validate -B backup-path [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:182 src/help.c:551 +#, c-format +msgid " [-i backup-id] [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:187 +#, c-format +msgid " [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:190 +#, c-format +msgid "" +"\n" +" %s checkdb [-B backup-path] [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:191 +#, c-format +msgid " [-D pgdata-path] [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:192 src/help.c:603 +#, c-format +msgid " [--amcheck] [--skip-block-validation]\n" +msgstr "" + +#: src/help.c:193 +#, c-format +msgid " [--heapallindexed] [--checkunique]\n" +msgstr "" + +#: src/help.c:196 +#, c-format +msgid "" +"\n" +" %s show -B backup-path\n" +msgstr "" + +#: src/help.c:197 src/help.c:657 +#, c-format +msgid " [--instance=instance_name [-i backup-id]]\n" +msgstr "" + +#: src/help.c:198 +#, c-format +msgid " [--format=format] [--archive]\n" +msgstr "" + +#: src/help.c:199 +#, c-format +msgid " [--no-color] [--help]\n" +msgstr "" + +#: src/help.c:201 +#, c-format +msgid "" +"\n" +" %s delete -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:202 src/help.c:673 +#, c-format +msgid " [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:206 +#, c-format +msgid " [-i backup-id | --delete-expired | --merge-expired | --status=backup_status]\n" +msgstr "" + +#: src/help.c:207 +#, c-format +msgid " [--delete-wal]\n" +msgstr "" + +#: src/help.c:208 +#, c-format +msgid " [--dry-run] [--no-validate] [--no-sync]\n" +msgstr "" + +#: src/help.c:211 +#, c-format +msgid "" +"\n" +" %s merge -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:212 +#, c-format +msgid " -i backup-id [--progress] [-j num-threads]\n" +msgstr "" + +#: src/help.c:213 src/help.c:730 +#, c-format +msgid " [--no-validate] [--no-sync]\n" +msgstr "" + +#: src/help.c:216 +#, c-format +msgid "" +"\n" +" %s add-instance -B backup-path -D pgdata-path\n" +msgstr "" + +#: src/help.c:217 src/help.c:225 src/help.c:904 +#, c-format +msgid " --instance=instance_name\n" +msgstr "" + +#: src/help.c:224 +#, c-format +msgid "" +"\n" +" %s del-instance -B backup-path\n" +msgstr "" + +#: src/help.c:228 +#, c-format +msgid "" +"\n" +" %s archive-push -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:229 src/help.c:244 src/help.c:942 src/help.c:990 +#, c-format +msgid " --wal-file-name=wal-file-name\n" +msgstr "" + +#: src/help.c:230 src/help.c:943 src/help.c:991 +#, c-format +msgid " [--wal-file-path=wal-file-path]\n" +msgstr "" + +#: src/help.c:231 src/help.c:245 src/help.c:944 src/help.c:992 +#, c-format +msgid " [-j num-threads] [--batch-size=batch_size]\n" +msgstr "" + +#: src/help.c:233 src/help.c:946 +#, c-format +msgid " [--no-ready-rename] [--no-sync]\n" +msgstr "" + +#: src/help.c:234 src/help.c:947 +#, c-format +msgid " [--overwrite] [--compress]\n" +msgstr "" + +#: src/help.c:242 +#, c-format +msgid "" +"\n" +" %s archive-get -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:243 +#, c-format +msgid " --wal-file-path=wal-file-path\n" +msgstr "" + +#: src/help.c:246 src/help.c:993 +#, c-format +msgid " [--no-validate-wal]\n" +msgstr "" + +#: src/help.c:252 +#, c-format +msgid "" +"\n" +" %s catchup -b catchup-mode\n" +msgstr "" + +#: src/help.c:253 src/help.c:1039 +#, c-format +msgid " --source-pgdata=path_to_pgdata_on_remote_server\n" +msgstr "" + +#: src/help.c:254 src/help.c:1040 +#, c-format +msgid " --destination-pgdata=path_to_local_dir\n" +msgstr "" + +#: src/help.c:255 +#, c-format +msgid " [--stream [-S slot-name] [--temp-slot | --perm-slot]]\n" +msgstr "" + +#: src/help.c:256 src/help.c:1042 +#, c-format +msgid " [-j num-threads]\n" +msgstr "" + +#: src/help.c:257 src/help.c:434 src/help.c:1043 +#, c-format +msgid " [-T OLDDIR=NEWDIR]\n" +msgstr "" + +#: src/help.c:258 src/help.c:1044 +#, c-format +msgid " [--exclude-path=path_prefix]\n" +msgstr "" + +#: src/help.c:270 +#, c-format +msgid "Read the website for details <%s>.\n" +msgstr "Подробнее читайте на сайте <%s>.\n" + +#: src/help.c:272 +#, c-format +msgid "Report bugs to <%s>.\n" +msgstr "Сообщайте об ошибках в <%s>.\n" + +#: src/help.c:279 +#, c-format +msgid "" +"\n" +"Unknown command. Try pg_probackup help\n" +"\n" +msgstr "" +"\n" +"Неизвестная команда. Попробуйте pg_probackup help\n" +"\n" + +#: src/help.c:285 +#, c-format +msgid "" +"\n" +"This command is intended for internal use\n" +"\n" +msgstr "" + +#: src/help.c:291 +#, c-format +msgid "" +"\n" +"%s init -B backup-path\n" +"\n" +msgstr "" + +#: src/help.c:292 +#, c-format +msgid "" +" -B, --backup-path=backup-path location of the backup storage area\n" +"\n" +msgstr "" + +#: src/help.c:298 +#, c-format +msgid "" +"\n" +"%s backup -B backup-path -b backup-mode --instance=instance_name\n" +msgstr "" + +#: src/help.c:303 src/help.c:792 +#, c-format +msgid " [-E external-directories-paths]\n" +msgstr "" + +#: src/help.c:325 +#, c-format +msgid "" +" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n" +"\n" +msgstr "" + +#: src/help.c:327 src/help.c:455 src/help.c:558 src/help.c:606 src/help.c:660 +#: src/help.c:679 src/help.c:739 src/help.c:812 src/help.c:895 src/help.c:910 +#: src/help.c:934 src/help.c:954 src/help.c:998 +#, c-format +msgid " -B, --backup-path=backup-path location of the backup storage area\n" +msgstr "" + +#: src/help.c:328 +#, c-format +msgid " -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n" +msgstr "" + +#: src/help.c:329 src/help.c:456 src/help.c:559 src/help.c:607 src/help.c:680 +#: src/help.c:740 src/help.c:813 src/help.c:896 +#, c-format +msgid " --instance=instance_name name of the instance\n" +msgstr "" + +#: src/help.c:330 src/help.c:458 src/help.c:608 src/help.c:814 src/help.c:911 +#, c-format +msgid " -D, --pgdata=pgdata-path location of the database storage area\n" +msgstr "" + +#: src/help.c:331 +#, c-format +msgid " -C, --smooth-checkpoint do smooth checkpoint before backup\n" +msgstr "" + +#: src/help.c:332 +#, c-format +msgid " --stream stream the transaction log and include it in the backup\n" +msgstr "" + +#: src/help.c:333 src/help.c:1054 +#, c-format +msgid " -S, --slot=SLOTNAME replication slot to use\n" +msgstr "" + +#: src/help.c:334 src/help.c:1055 +#, c-format +msgid " --temp-slot use temporary replication slot\n" +msgstr "" + +#: src/help.c:335 +#, c-format +msgid " --backup-pg-log backup of '%s' directory\n" +msgstr "" + +#: src/help.c:336 src/help.c:460 src/help.c:563 src/help.c:611 src/help.c:682 +#: src/help.c:743 src/help.c:960 src/help.c:1004 src/help.c:1058 +#, c-format +msgid " -j, --threads=NUM number of parallel threads\n" +msgstr "" + +#: src/help.c:337 src/help.c:462 src/help.c:562 src/help.c:610 src/help.c:683 +#: src/help.c:744 +#, c-format +msgid " --progress show progress\n" +msgstr "" + +#: src/help.c:338 +#, c-format +msgid " --no-validate disable validation after backup\n" +msgstr "" + +#: src/help.c:339 src/help.c:466 src/help.c:573 +#, c-format +msgid " --skip-block-validation set to validate only file-level checksum\n" +msgstr "" + +#: src/help.c:340 src/help.c:815 src/help.c:914 +#, c-format +msgid " -E --external-dirs=external-directories-paths\n" +msgstr "" + +#: src/help.c:341 src/help.c:816 src/help.c:915 +#, c-format +msgid " backup some directories not from pgdata \n" +msgstr "" + +#: src/help.c:342 src/help.c:817 src/help.c:916 +#, c-format +msgid " (example: --external-dirs=/tmp/dir1:/tmp/dir2)\n" +msgstr "" + +#: src/help.c:343 +#, c-format +msgid " --no-sync do not sync backed up files to disk\n" +msgstr "" + +#: src/help.c:344 +#, c-format +msgid " --note=text add note to backup\n" +msgstr "" + +#: src/help.c:345 src/help.c:784 +#, c-format +msgid " (example: --note='backup before app update to v13.1')\n" +msgstr "" + +#: src/help.c:347 src/help.c:508 src/help.c:575 src/help.c:622 src/help.c:702 +#: src/help.c:748 src/help.c:820 +#, c-format +msgid "" +"\n" +" Logging options:\n" +msgstr "" + +#: src/help.c:348 src/help.c:509 src/help.c:576 src/help.c:623 src/help.c:703 +#: src/help.c:749 src/help.c:821 +#, c-format +msgid " --log-level-console=log-level-console\n" +msgstr "" + +#: src/help.c:349 src/help.c:510 src/help.c:577 src/help.c:624 src/help.c:704 +#: src/help.c:750 src/help.c:822 +#, c-format +msgid " level for console logging (default: info)\n" +msgstr "" + +#: src/help.c:350 src/help.c:353 src/help.c:511 src/help.c:514 src/help.c:578 +#: src/help.c:581 src/help.c:625 src/help.c:628 src/help.c:705 src/help.c:708 +#: src/help.c:751 src/help.c:754 src/help.c:823 src/help.c:826 +#, c-format +msgid " available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n" +msgstr "" + +#: src/help.c:351 src/help.c:512 src/help.c:579 src/help.c:626 src/help.c:706 +#: src/help.c:752 src/help.c:824 +#, c-format +msgid " --log-level-file=log-level-file\n" +msgstr "" + +#: src/help.c:352 src/help.c:513 src/help.c:580 src/help.c:627 src/help.c:707 +#: src/help.c:753 src/help.c:825 +#, c-format +msgid " level for file logging (default: off)\n" +msgstr "" + +#: src/help.c:354 src/help.c:515 src/help.c:582 src/help.c:629 src/help.c:709 +#: src/help.c:755 src/help.c:827 +#, c-format +msgid " --log-filename=log-filename\n" +msgstr "" + +#: src/help.c:355 src/help.c:516 src/help.c:583 src/help.c:630 src/help.c:710 +#: src/help.c:756 src/help.c:828 +#, c-format +msgid " filename for file logging (default: 'pg_probackup.log')\n" +msgstr "" + +#: src/help.c:356 src/help.c:517 src/help.c:584 src/help.c:711 src/help.c:757 +#: src/help.c:829 +#, c-format +msgid " support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n" +msgstr "" + +#: src/help.c:357 src/help.c:518 src/help.c:585 src/help.c:632 src/help.c:712 +#: src/help.c:758 src/help.c:830 +#, c-format +msgid " --error-log-filename=error-log-filename\n" +msgstr "" + +#: src/help.c:358 src/help.c:519 src/help.c:586 src/help.c:633 src/help.c:713 +#: src/help.c:759 src/help.c:831 +#, c-format +msgid " filename for error logging (default: none)\n" +msgstr "" + +#: src/help.c:359 src/help.c:520 src/help.c:587 src/help.c:634 src/help.c:714 +#: src/help.c:760 src/help.c:832 +#, c-format +msgid " --log-directory=log-directory\n" +msgstr "" + +#: src/help.c:360 src/help.c:521 src/help.c:588 src/help.c:635 src/help.c:715 +#: src/help.c:761 src/help.c:833 +#, c-format +msgid " directory for file logging (default: BACKUP_PATH/log)\n" +msgstr "" + +#: src/help.c:361 src/help.c:522 src/help.c:589 src/help.c:636 src/help.c:716 +#: src/help.c:762 src/help.c:834 +#, c-format +msgid " --log-rotation-size=log-rotation-size\n" +msgstr "" + +#: src/help.c:362 src/help.c:523 src/help.c:590 src/help.c:637 src/help.c:717 +#: src/help.c:763 src/help.c:835 +#, c-format +msgid " rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:363 src/help.c:524 src/help.c:591 src/help.c:638 src/help.c:718 +#: src/help.c:764 src/help.c:836 +#, c-format +msgid " available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n" +msgstr "" + +#: src/help.c:364 src/help.c:525 src/help.c:592 src/help.c:639 src/help.c:719 +#: src/help.c:765 src/help.c:837 +#, c-format +msgid " --log-rotation-age=log-rotation-age\n" +msgstr "" + +#: src/help.c:365 src/help.c:526 src/help.c:593 src/help.c:640 src/help.c:720 +#: src/help.c:766 src/help.c:838 +#, c-format +msgid " rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:366 src/help.c:527 src/help.c:594 src/help.c:641 src/help.c:721 +#: src/help.c:767 src/help.c:839 +#, c-format +msgid " available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n" +msgstr "" + +#: src/help.c:367 src/help.c:528 src/help.c:642 +#, c-format +msgid " --no-color disable the coloring of error and warning console messages\n" +msgstr "" + +#: src/help.c:369 src/help.c:687 src/help.c:841 +#, c-format +msgid "" +"\n" +" Retention options:\n" +msgstr "" + +#: src/help.c:370 src/help.c:688 +#, c-format +msgid " --delete-expired delete backups expired according to current\n" +msgstr "" + +#: src/help.c:371 src/help.c:373 +#, c-format +msgid " retention policy after successful backup completion\n" +msgstr "" + +#: src/help.c:372 src/help.c:690 +#, c-format +msgid " --merge-expired merge backups expired according to current\n" +msgstr "" + +#: src/help.c:374 src/help.c:692 +#, c-format +msgid " --delete-wal remove redundant files in WAL archive\n" +msgstr "" + +#: src/help.c:375 src/help.c:693 src/help.c:842 +#, c-format +msgid " --retention-redundancy=retention-redundancy\n" +msgstr "" + +#: src/help.c:376 src/help.c:694 src/help.c:843 +#, c-format +msgid " number of full backups to keep; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:377 src/help.c:695 src/help.c:844 +#, c-format +msgid " --retention-window=retention-window\n" +msgstr "" + +#: src/help.c:378 src/help.c:696 src/help.c:845 +#, c-format +msgid " number of days of recoverability; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:379 src/help.c:697 +#, c-format +msgid " --wal-depth=wal-depth number of latest valid backups per timeline that must\n" +msgstr "" + +#: src/help.c:380 src/help.c:698 +#, c-format +msgid " retain the ability to perform PITR; 0 disables; (default: 0)\n" +msgstr "" + +#: src/help.c:381 src/help.c:699 +#, c-format +msgid " --dry-run perform a trial run without any changes\n" +msgstr "" + +#: src/help.c:383 +#, c-format +msgid "" +"\n" +" Pinning options:\n" +msgstr "" + +#: src/help.c:384 src/help.c:778 +#, c-format +msgid " --ttl=interval pin backup for specified amount of time; 0 unpin\n" +msgstr "" + +#: src/help.c:385 src/help.c:779 +#, c-format +msgid " available units: 'ms', 's', 'min', 'h', 'd' (default: s)\n" +msgstr "" + +#: src/help.c:386 src/help.c:780 +#, c-format +msgid " (example: --ttl=20d)\n" +msgstr "" + +#: src/help.c:387 src/help.c:781 +#, c-format +msgid " --expire-time=time pin backup until specified time stamp\n" +msgstr "" + +#: src/help.c:388 src/help.c:782 +#, c-format +msgid " (example: --expire-time='2024-01-01 00:00:00+03')\n" +msgstr "" + +#: src/help.c:390 src/help.c:849 src/help.c:967 +#, c-format +msgid "" +"\n" +" Compression options:\n" +msgstr "" + +#: src/help.c:391 src/help.c:850 src/help.c:968 +#, c-format +msgid " --compress alias for --compress-algorithm='zlib' and --compress-level=1\n" +msgstr "" + +#: src/help.c:392 src/help.c:851 src/help.c:969 +#, c-format +msgid " --compress-algorithm=compress-algorithm\n" +msgstr "" + +#: src/help.c:393 +#, c-format +msgid " available options: 'zlib', 'pglz', 'none' (default: none)\n" +msgstr "" + +#: src/help.c:394 src/help.c:853 src/help.c:971 +#, c-format +msgid " --compress-level=compress-level\n" +msgstr "" + +#: src/help.c:395 src/help.c:854 src/help.c:972 +#, c-format +msgid " level of compression [0-9] (default: 1)\n" +msgstr "" + +#: src/help.c:397 src/help.c:856 +#, c-format +msgid "" +"\n" +" Archive options:\n" +msgstr "" + +#: src/help.c:398 src/help.c:857 +#, c-format +msgid " --archive-timeout=timeout wait timeout for WAL segment archiving (default: 5min)\n" +msgstr "" + +#: src/help.c:400 src/help.c:644 src/help.c:859 src/help.c:1066 +#, c-format +msgid "" +"\n" +" Connection options:\n" +msgstr "" + +#: src/help.c:401 src/help.c:645 src/help.c:860 src/help.c:1067 +#, c-format +msgid " -U, --pguser=USERNAME user name to connect as (default: current local user)\n" +msgstr "" + +#: src/help.c:402 src/help.c:646 src/help.c:861 src/help.c:1068 +#, c-format +msgid " -d, --pgdatabase=DBNAME database to connect (default: username)\n" +msgstr "" + +#: src/help.c:403 src/help.c:647 src/help.c:862 src/help.c:1069 +#, c-format +msgid " -h, --pghost=HOSTNAME database server host or socket directory(default: 'local socket')\n" +msgstr "" + +#: src/help.c:404 src/help.c:648 src/help.c:863 src/help.c:1070 +#, c-format +msgid " -p, --pgport=PORT database server port (default: 5432)\n" +msgstr "" + +#: src/help.c:405 src/help.c:649 src/help.c:1071 +#, c-format +msgid " -w, --no-password never prompt for password\n" +msgstr "" + +#: src/help.c:406 +#, c-format +msgid " -W, --password force password prompt\n" +msgstr "" + +#: src/help.c:408 src/help.c:530 src/help.c:865 src/help.c:917 src/help.c:974 +#: src/help.c:1009 src/help.c:1074 +#, c-format +msgid "" +"\n" +" Remote options:\n" +msgstr "" + +#: src/help.c:409 src/help.c:531 src/help.c:866 src/help.c:918 src/help.c:975 +#: src/help.c:1010 src/help.c:1075 +#, c-format +msgid " --remote-proto=protocol remote protocol to use\n" +msgstr "" + +#: src/help.c:410 src/help.c:532 src/help.c:867 src/help.c:919 src/help.c:976 +#: src/help.c:1011 src/help.c:1076 +#, c-format +msgid " available options: 'ssh', 'none' (default: ssh)\n" +msgstr "" + +#: src/help.c:411 src/help.c:533 src/help.c:868 src/help.c:920 +#, c-format +msgid " --remote-host=destination remote host address or hostname\n" +msgstr "" + +#: src/help.c:412 src/help.c:534 src/help.c:869 src/help.c:921 src/help.c:978 +#: src/help.c:1013 src/help.c:1078 +#, c-format +msgid " --remote-port=port remote host port (default: 22)\n" +msgstr "" + +#: src/help.c:413 src/help.c:535 src/help.c:870 src/help.c:922 src/help.c:979 +#: src/help.c:1014 src/help.c:1079 +#, c-format +msgid " --remote-path=path path to directory with pg_probackup binary on remote host\n" +msgstr "" + +#: src/help.c:414 src/help.c:536 src/help.c:871 src/help.c:923 src/help.c:980 +#: src/help.c:1015 src/help.c:1080 +#, c-format +msgid " (default: current binary path)\n" +msgstr "" + +#: src/help.c:415 src/help.c:537 src/help.c:872 src/help.c:924 src/help.c:981 +#: src/help.c:1016 src/help.c:1081 +#, c-format +msgid " --remote-user=username user name for ssh connection (default: current user)\n" +msgstr "" + +#: src/help.c:416 src/help.c:538 src/help.c:873 src/help.c:925 src/help.c:982 +#: src/help.c:1017 src/help.c:1082 +#, c-format +msgid " --ssh-options=ssh_options additional ssh options (default: none)\n" +msgstr "" + +#: src/help.c:417 src/help.c:539 src/help.c:874 +#, c-format +msgid " (example: --ssh-options='-c cipher_spec -F configfile')\n" +msgstr "" + +#: src/help.c:419 src/help.c:881 +#, c-format +msgid "" +"\n" +" Replica options:\n" +msgstr "" + +#: src/help.c:420 src/help.c:882 +#, c-format +msgid " --master-user=user_name user name to connect to master (deprecated)\n" +msgstr "" + +#: src/help.c:421 src/help.c:883 +#, c-format +msgid " --master-db=db_name database to connect to master (deprecated)\n" +msgstr "" + +#: src/help.c:422 src/help.c:884 +#, c-format +msgid " --master-host=host_name database server host of master (deprecated)\n" +msgstr "" + +#: src/help.c:423 src/help.c:885 +#, c-format +msgid " --master-port=port database server port of master (deprecated)\n" +msgstr "" + +#: src/help.c:424 src/help.c:886 +#, c-format +msgid "" +" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n" +"\n" +msgstr "" + +#: src/help.c:430 +#, c-format +msgid "" +"\n" +"%s restore -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:432 +#, c-format +msgid " [--progress] [--force] [--no-sync]\n" +msgstr "" + +#: src/help.c:436 +#, c-format +msgid " [--skip-external-dirs]\n" +msgstr "" + +#: src/help.c:438 +#, c-format +msgid " [--db-include dbname | --db-exclude dbname]\n" +msgstr "" + +#: src/help.c:446 +#, c-format +msgid " [-R | --restore-as-replica]\n" +msgstr "" + +#: src/help.c:452 +#, c-format +msgid " [--archive-host=hostname] [--archive-port=port]\n" +msgstr "" + +#: src/help.c:453 +#, c-format +msgid "" +" [--archive-user=username]\n" +"\n" +msgstr "" + +#: src/help.c:459 +#, c-format +msgid " -i, --backup-id=backup-id backup to restore\n" +msgstr "" + +#: src/help.c:463 +#, c-format +msgid " --force ignore invalid status of the restored backup\n" +msgstr "" + +#: src/help.c:464 +#, c-format +msgid " --no-sync do not sync restored files to disk\n" +msgstr "" + +#: src/help.c:465 +#, c-format +msgid " --no-validate disable backup validation during restore\n" +msgstr "" + +#: src/help.c:468 src/help.c:1060 +#, c-format +msgid " -T, --tablespace-mapping=OLDDIR=NEWDIR\n" +msgstr "" + +#: src/help.c:469 src/help.c:1061 +#, c-format +msgid " relocate the tablespace from directory OLDDIR to NEWDIR\n" +msgstr "" + +#: src/help.c:470 +#, c-format +msgid " --external-mapping=OLDDIR=NEWDIR\n" +msgstr "" + +#: src/help.c:471 +#, c-format +msgid " relocate the external directory from OLDDIR to NEWDIR\n" +msgstr "" + +#: src/help.c:472 +#, c-format +msgid " --skip-external-dirs do not restore all external directories\n" +msgstr "" + +#: src/help.c:474 +#, c-format +msgid "" +"\n" +" Incremental restore options:\n" +msgstr "" + +#: src/help.c:475 +#, c-format +msgid " -I, --incremental-mode=none|checksum|lsn\n" +msgstr "" + +#: src/help.c:476 +#, c-format +msgid " reuse valid pages available in PGDATA if they have not changed\n" +msgstr "" + +#: src/help.c:477 +#, c-format +msgid " (default: none)\n" +msgstr "" + +#: src/help.c:479 +#, c-format +msgid "" +"\n" +" Partial restore options:\n" +msgstr "" + +#: src/help.c:480 +#, c-format +msgid " --db-include dbname restore only specified databases\n" +msgstr "" + +#: src/help.c:481 +#, c-format +msgid " --db-exclude dbname do not restore specified databases\n" +msgstr "" + +#: src/help.c:483 +#, c-format +msgid "" +"\n" +" Recovery options:\n" +msgstr "" + +#: src/help.c:484 src/help.c:564 +#, c-format +msgid " --recovery-target-time=time time stamp up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:485 src/help.c:565 +#, c-format +msgid " --recovery-target-xid=xid transaction ID up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:486 src/help.c:566 +#, c-format +msgid " --recovery-target-lsn=lsn LSN of the write-ahead log location up to which recovery will proceed\n" +msgstr "" + +#: src/help.c:487 src/help.c:567 +#, c-format +msgid " --recovery-target-inclusive=boolean\n" +msgstr "" + +#: src/help.c:488 src/help.c:568 +#, c-format +msgid " whether we stop just after the recovery target\n" +msgstr "" + +#: src/help.c:489 src/help.c:569 +#, c-format +msgid " --recovery-target-timeline=timeline\n" +msgstr "" + +#: src/help.c:490 src/help.c:570 +#, c-format +msgid " recovering into a particular timeline\n" +msgstr "" + +#: src/help.c:491 +#, c-format +msgid " --recovery-target=immediate|latest\n" +msgstr "" + +#: src/help.c:492 +#, c-format +msgid " end recovery as soon as a consistent state is reached or as late as possible\n" +msgstr "" + +#: src/help.c:493 src/help.c:571 +#, c-format +msgid " --recovery-target-name=target-name\n" +msgstr "" + +#: src/help.c:494 src/help.c:572 +#, c-format +msgid " the named restore point to which recovery will proceed\n" +msgstr "" + +#: src/help.c:495 +#, c-format +msgid " --recovery-target-action=pause|promote|shutdown\n" +msgstr "" + +#: src/help.c:496 +#, c-format +msgid " action the server should take once the recovery target is reached\n" +msgstr "" + +#: src/help.c:497 +#, c-format +msgid " (default: pause)\n" +msgstr "" + +#: src/help.c:498 src/help.c:818 +#, c-format +msgid " --restore-command=cmdline command to use as 'restore_command' in recovery.conf; 'none' disables\n" +msgstr "" + +#: src/help.c:500 +#, c-format +msgid "" +"\n" +" Standby options:\n" +msgstr "" + +#: src/help.c:501 +#, c-format +msgid " -R, --restore-as-replica write a minimal recovery.conf in the output directory\n" +msgstr "" + +#: src/help.c:502 +#, c-format +msgid " to ease setting up a standby server\n" +msgstr "" + +#: src/help.c:503 +#, c-format +msgid " --primary-conninfo=primary_conninfo\n" +msgstr "" + +#: src/help.c:504 +#, c-format +msgid " connection string to be used for establishing connection\n" +msgstr "" + +#: src/help.c:505 +#, c-format +msgid " with the primary server\n" +msgstr "" + +#: src/help.c:506 +#, c-format +msgid " -S, --primary-slot-name=slotname replication slot to be used for WAL streaming from the primary server\n" +msgstr "" + +#: src/help.c:541 src/help.c:876 +#, c-format +msgid "" +"\n" +" Remote WAL archive options:\n" +msgstr "" + +#: src/help.c:542 src/help.c:877 +#, c-format +msgid " --archive-host=destination address or hostname for ssh connection to archive host\n" +msgstr "" + +#: src/help.c:543 src/help.c:878 +#, c-format +msgid " --archive-port=port port for ssh connection to archive host (default: 22)\n" +msgstr "" + +#: src/help.c:544 +#, c-format +msgid "" +" --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n" +"\n" +msgstr "" + +#: src/help.c:550 +#, c-format +msgid "" +"\n" +"%s validate -B backup-path [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:556 +#, c-format +msgid "" +" [--skip-block-validation]\n" +"\n" +msgstr "" + +#: src/help.c:560 +#, c-format +msgid " -i, --backup-id=backup-id backup to validate\n" +msgstr "" + +#: src/help.c:595 src/help.c:722 src/help.c:768 +#, c-format +msgid "" +" --no-color disable the coloring of error and warning console messages\n" +"\n" +msgstr "" + +#: src/help.c:601 +#, c-format +msgid "" +"\n" +"%s checkdb [-B backup-path] [--instance=instance_name]\n" +msgstr "" + +#: src/help.c:602 +#, c-format +msgid " [-D pgdata-path] [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:604 +#, c-format +msgid "" +" [--heapallindexed] [--checkunique]\n" +"\n" +msgstr "" + +#: src/help.c:612 +#, c-format +msgid " --skip-block-validation skip file-level checking\n" +msgstr "" + +#: src/help.c:613 src/help.c:618 src/help.c:620 +#, c-format +msgid " can be used only with '--amcheck' option\n" +msgstr "" + +#: src/help.c:614 +#, c-format +msgid " --amcheck in addition to file-level block checking\n" +msgstr "" + +#: src/help.c:615 +#, c-format +msgid " check btree indexes via function 'bt_index_check()'\n" +msgstr "" + +#: src/help.c:616 +#, c-format +msgid " using 'amcheck' or 'amcheck_next' extensions\n" +msgstr "" + +#: src/help.c:617 +#, c-format +msgid " --heapallindexed also check that heap is indexed\n" +msgstr "" + +#: src/help.c:619 +#, c-format +msgid " --checkunique also check unique constraints\n" +msgstr "" + +#: src/help.c:631 +#, c-format +msgid " support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n" +msgstr "" + +#: src/help.c:650 src/help.c:1072 +#, c-format +msgid "" +" -W, --password force password prompt\n" +"\n" +msgstr "" + +#: src/help.c:656 +#, c-format +msgid "" +"\n" +"%s show -B backup-path\n" +msgstr "" + +#: src/help.c:658 +#, c-format +msgid "" +" [--format=format] [--archive]\n" +"\n" +msgstr "" + +#: src/help.c:661 +#, c-format +msgid " --instance=instance_name show info about specific instance\n" +msgstr "" + +#: src/help.c:662 +#, c-format +msgid " -i, --backup-id=backup-id show info about specific backups\n" +msgstr "" + +#: src/help.c:663 +#, c-format +msgid " --archive show WAL archive information\n" +msgstr "" + +#: src/help.c:664 +#, c-format +msgid " --format=format show format=PLAIN|JSON\n" +msgstr "" + +#: src/help.c:665 +#, c-format +msgid "" +" --no-color disable the coloring for plain format\n" +"\n" +msgstr "" + +#: src/help.c:671 +#, c-format +msgid "" +"\n" +"%s delete -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:672 +#, c-format +msgid " [-i backup-id | --delete-expired | --merge-expired] [--delete-wal]\n" +msgstr "" + +#: src/help.c:677 +#, c-format +msgid "" +" [--no-validate] [--no-sync]\n" +"\n" +msgstr "" + +#: src/help.c:681 +#, c-format +msgid " -i, --backup-id=backup-id backup to delete\n" +msgstr "" + +#: src/help.c:684 src/help.c:745 +#, c-format +msgid " --no-validate disable validation during retention merge\n" +msgstr "" + +#: src/help.c:685 src/help.c:746 +#, c-format +msgid " --no-sync do not sync merged files to disk\n" +msgstr "" + +#: src/help.c:689 src/help.c:691 +#, c-format +msgid " retention policy\n" +msgstr "" + +#: src/help.c:700 +#, c-format +msgid " --status=backup_status delete all backups with specified status\n" +msgstr "" + +#: src/help.c:728 +#, c-format +msgid "" +"\n" +"%s merge -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:729 +#, c-format +msgid " -i backup-id [-j num-threads] [--progress]\n" +msgstr "" + +#: src/help.c:737 +#, c-format +msgid "" +" [--log-rotation-age=log-rotation-age]\n" +"\n" +msgstr "" + +#: src/help.c:741 +#, c-format +msgid " -i, --backup-id=backup-id backup to merge\n" +msgstr "" + +#: src/help.c:774 +#, c-format +msgid "" +"\n" +"%s set-backup -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:775 +#, c-format +msgid " -i backup-id\n" +msgstr "" + +#: src/help.c:776 +#, c-format +msgid "" +" [--ttl=interval] [--expire-time=time] [--note=text]\n" +"\n" +msgstr "" + +#: src/help.c:783 +#, c-format +msgid " --note=text add note to backup; 'none' to remove note\n" +msgstr "" + +#: src/help.c:790 +#, c-format +msgid "" +"\n" +"%s set-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:810 src/help.c:908 src/help.c:952 src/help.c:996 +#, c-format +msgid "" +" [--ssh-options]\n" +"\n" +msgstr "" + +#: src/help.c:846 +#, c-format +msgid " --wal-depth=wal-depth number of latest valid backups with ability to perform\n" +msgstr "" + +#: src/help.c:847 +#, c-format +msgid " the point in time recovery; disables; (default: 0)\n" +msgstr "" + +#: src/help.c:852 src/help.c:970 +#, c-format +msgid " available options: 'zlib','pglz','none' (default: 'none')\n" +msgstr "" + +#: src/help.c:879 +#, c-format +msgid " --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n" +msgstr "" + +#: src/help.c:892 +#, c-format +msgid "" +"\n" +"%s show-config -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:893 +#, c-format +msgid "" +" [--format=format]\n" +"\n" +msgstr "" + +#: src/help.c:897 +#, c-format +msgid "" +" --format=format show format=PLAIN|JSON\n" +"\n" +msgstr "" + +#: src/help.c:903 +#, c-format +msgid "" +"\n" +"%s add-instance -B backup-path -D pgdata-path\n" +msgstr "" + +#: src/help.c:905 +#, c-format +msgid " [-E external-directory-path]\n" +msgstr "" + +#: src/help.c:912 +#, c-format +msgid " --instance=instance_name name of the new instance\n" +msgstr "" + +#: src/help.c:926 src/help.c:983 src/help.c:1018 src/help.c:1083 +#, c-format +msgid "" +" (example: --ssh-options='-c cipher_spec -F configfile')\n" +"\n" +msgstr "" + +#: src/help.c:932 +#, c-format +msgid "" +"\n" +"%s del-instance -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:935 +#, c-format +msgid "" +" --instance=instance_name name of the instance to delete\n" +"\n" +msgstr "" + +#: src/help.c:941 +#, c-format +msgid "" +"\n" +"%s archive-push -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:955 src/help.c:999 +#, c-format +msgid " --instance=instance_name name of the instance to delete\n" +msgstr "" + +#: src/help.c:956 src/help.c:1002 +#, c-format +msgid " --wal-file-name=wal-file-name\n" +msgstr "" + +#: src/help.c:957 +#, c-format +msgid " name of the file to copy into WAL archive\n" +msgstr "" + +#: src/help.c:958 src/help.c:1000 +#, c-format +msgid " --wal-file-path=wal-file-path\n" +msgstr "" + +#: src/help.c:959 +#, c-format +msgid " relative destination path of the WAL archive\n" +msgstr "" + +#: src/help.c:961 +#, c-format +msgid " --batch-size=NUM number of files to be copied\n" +msgstr "" + +#: src/help.c:962 +#, c-format +msgid " --archive-timeout=timeout wait timeout before discarding stale temp file(default: 5min)\n" +msgstr "" + +#: src/help.c:963 +#, c-format +msgid " --no-ready-rename do not rename '.ready' files in 'archive_status' directory\n" +msgstr "" + +#: src/help.c:964 +#, c-format +msgid " --no-sync do not sync WAL file to disk\n" +msgstr "" + +#: src/help.c:965 +#, c-format +msgid " --overwrite overwrite archived WAL file\n" +msgstr "" + +#: src/help.c:977 src/help.c:1012 src/help.c:1077 +#, c-format +msgid " --remote-host=hostname remote host address or hostname\n" +msgstr "" + +#: src/help.c:989 +#, c-format +msgid "" +"\n" +"%s archive-get -B backup-path --instance=instance_name\n" +msgstr "" + +#: src/help.c:1001 +#, c-format +msgid " relative destination path name of the WAL file on the server\n" +msgstr "" + +#: src/help.c:1003 +#, c-format +msgid " name of the WAL file to retrieve from the archive\n" +msgstr "" + +#: src/help.c:1005 +#, c-format +msgid " --batch-size=NUM number of files to be prefetched\n" +msgstr "" + +#: src/help.c:1006 +#, c-format +msgid " --prefetch-dir=path location of the store area for prefetched WAL files\n" +msgstr "" + +#: src/help.c:1007 +#, c-format +msgid " --no-validate-wal skip validation of prefetched WAL file before using it\n" +msgstr "" + +#: src/help.c:1024 +#, c-format +msgid "" +"\n" +"%s help [command]\n" +msgstr "" + +#: src/help.c:1025 +#, c-format +msgid "" +"%s command --help\n" +"\n" +msgstr "" + +#: src/help.c:1031 +#, c-format +msgid "" +"\n" +"%s version\n" +msgstr "" + +#: src/help.c:1032 +#, c-format +msgid "" +"%s --version\n" +"\n" +msgstr "" + +#: src/help.c:1038 +#, c-format +msgid "" +"\n" +"%s catchup -b catchup-mode\n" +msgstr "" + +#: src/help.c:1041 +#, c-format +msgid " [--stream [-S slot-name]] [--temp-slot | --perm-slot]\n" +msgstr "" + +#: src/help.c:1050 +#, c-format +msgid "" +" [--help]\n" +"\n" +msgstr "" + +#: src/help.c:1052 +#, c-format +msgid " -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n" +msgstr "" + +#: src/help.c:1053 +#, c-format +msgid " --stream stream the transaction log (only supported mode)\n" +msgstr "" + +#: src/help.c:1056 +#, c-format +msgid " -P --perm-slot create permanent replication slot\n" +msgstr "" + +#: src/help.c:1062 +#, c-format +msgid " -x, --exclude-path=path_prefix files with path_prefix (relative to pgdata) will be\n" +msgstr "" + +#: src/help.c:1063 +#, c-format +msgid " excluded from catchup (can be used multiple times)\n" +msgstr "" + +#: src/help.c:1064 +#, c-format +msgid " Dangerous option! Use at your own risk!\n" +msgstr "" diff --git a/src/archive.c b/src/archive.c index 0f32d9345..7d753c8b3 100644 --- a/src/archive.c +++ b/src/archive.c @@ -3,7 +3,7 @@ * archive.c: - pg_probackup specific archive commands for archive backups. * * - * Portions Copyright (c) 2018-2021, Postgres Professional + * Portions Copyright (c) 2018-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -13,14 +13,6 @@ #include "utils/thread.h" #include "instr_time.h" -static int push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - uint32 archive_timeout); -#ifdef HAVE_LIBZ -static int push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, - const char *archive_dir, bool overwrite, bool no_sync, - int compress_level, uint32 archive_timeout); -#endif static void *push_files(void *arg); static void *get_files(void *arg); static bool get_wal_file(const char *filename, const char *from_path, const char *to_path, @@ -91,8 +83,19 @@ typedef struct WALSegno { char name[MAXFNAMELEN]; volatile pg_atomic_flag lock; + volatile pg_atomic_uint32 done; + struct WALSegno* prev; } WALSegno; +static int push_file_internal_uncompressed(WALSegno *wal_file_name, const char *pg_xlog_dir, + const char *archive_dir, bool overwrite, bool no_sync, + uint32 archive_timeout); +#ifdef HAVE_LIBZ +static int push_file_internal_gz(WALSegno *wal_file_name, const char *pg_xlog_dir, + const char *archive_dir, bool overwrite, bool no_sync, + int compress_level, uint32 archive_timeout); +#endif + static int push_file(WALSegno *xlogfile, const char *archive_status_dir, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, uint32 archive_timeout, @@ -337,16 +340,18 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, /* If compression is not required, then just copy it as is */ if (!is_compress) - rc = push_file_internal_uncompressed(xlogfile->name, pg_xlog_dir, + rc = push_file_internal_uncompressed(xlogfile, pg_xlog_dir, archive_dir, overwrite, no_sync, archive_timeout); #ifdef HAVE_LIBZ else - rc = push_file_internal_gz(xlogfile->name, pg_xlog_dir, archive_dir, + rc = push_file_internal_gz(xlogfile, pg_xlog_dir, archive_dir, overwrite, no_sync, compress_level, archive_timeout); #endif + pg_atomic_write_u32(&xlogfile->done, 1); + /* take '--no-ready-rename' flag into account */ if (!no_ready_rename && archive_status_dir != NULL) { @@ -361,7 +366,7 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, canonicalize_path(wal_file_ready); canonicalize_path(wal_file_done); /* It is ok to rename status file in archive_status directory */ - elog(VERBOSE, "Rename \"%s\" to \"%s\"", wal_file_ready, wal_file_done); + elog(LOG, "Rename \"%s\" to \"%s\"", wal_file_ready, wal_file_done); /* do not error out, if rename failed */ if (fio_rename(wal_file_ready, wal_file_done, FIO_DB_HOST) < 0) @@ -381,13 +386,14 @@ push_file(WALSegno *xlogfile, const char *archive_status_dir, * has the same checksum */ int -push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_dir, +push_file_internal_uncompressed(WALSegno *wal_file, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, uint32 archive_timeout) { FILE *in = NULL; int out = -1; char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ + const char *wal_file_name = wal_file->name; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; /* partial handling */ @@ -409,7 +415,10 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d /* Open source file for read */ in = fopen(from_fullpath, PG_BINARY_R); if (in == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open source file \"%s\": %s", from_fullpath, strerror(errno)); + } /* disable stdio buffering for input file */ setvbuf(in, NULL, _IONBF, BUFSIZ); @@ -422,8 +431,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } /* Already existing destination temp file is not an error condition */ } else @@ -453,15 +465,21 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } else /* Successfully created partial file */ break; } else + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot stat temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } /* first round */ @@ -492,8 +510,11 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (out < 0) { if (!partial_is_stale) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", to_fullpath_part, archive_timeout); + } /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_part); @@ -501,19 +522,22 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d out = fio_open(to_fullpath_part, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, FIO_BACKUP_HOST); if (out < 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } part_opened: - elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_part); + elog(LOG, "Temp WAL file successfully created: \"%s\"", to_fullpath_part); /* Check if possible to skip copying */ if (fileExists(to_fullpath, FIO_BACKUP_HOST)) { pg_crc32 crc32_src; pg_crc32 crc32_dst; - crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); - crc32_dst = fio_get_crc32(to_fullpath, FIO_BACKUP_HOST, false); + crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false, false); + crc32_dst = fio_get_crc32(to_fullpath, FIO_BACKUP_HOST, false, false); if (crc32_src == crc32_dst) { @@ -536,6 +560,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d * so we must unlink partial file and exit with error. */ fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath); } @@ -553,6 +578,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (ferror(in)) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot read source file \"%s\": %s", from_fullpath, strerror(errno)); } @@ -560,6 +586,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (read_len > 0 && fio_write_async(out, buf, read_len) != read_len) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to destination temp file \"%s\": %s", to_fullpath_part, strerror(errno)); } @@ -575,14 +602,29 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (fio_check_error_fd(out, &errmsg)) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to the remote file \"%s\": %s", to_fullpath_part, errmsg); } + if (wal_file->prev != NULL) + { + while (!pg_atomic_read_u32(&wal_file->prev->done)) + { + if (thread_interrupted || interrupted) + { + pg_atomic_write_u32(&wal_file->done, 1); + elog(ERROR, "Terminated while waiting for prev file"); + } + usleep(250); + } + } + /* close temp file */ if (fio_close(out) != 0) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot close temp WAL file \"%s\": %s", to_fullpath_part, strerror(errno)); } @@ -591,11 +633,14 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (!no_sync) { if (fio_sync(to_fullpath_part, FIO_BACKUP_HOST) != 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath_part, strerror(errno)); + } } - elog(VERBOSE, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath); + elog(LOG, "Rename \"%s\" to \"%s\"", to_fullpath_part, to_fullpath); //copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true); @@ -603,6 +648,7 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d if (fio_rename(to_fullpath_part, to_fullpath, FIO_BACKUP_HOST) < 0) { fio_unlink(to_fullpath_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", to_fullpath_part, to_fullpath, strerror(errno)); } @@ -620,13 +666,14 @@ push_file_internal_uncompressed(const char *wal_file_name, const char *pg_xlog_d * has the same checksum */ int -push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, +push_file_internal_gz(WALSegno *wal_file, const char *pg_xlog_dir, const char *archive_dir, bool overwrite, bool no_sync, int compress_level, uint32 archive_timeout) { FILE *in = NULL; gzFile out = NULL; char *buf = pgut_malloc(OUT_BUF_SIZE); + const char *wal_file_name = wal_file->name; char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; char to_fullpath_gz[MAXPGPATH]; @@ -656,8 +703,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, /* Open source file for read */ in = fopen(from_fullpath, PG_BINARY_R); if (in == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open source WAL file \"%s\": %s", from_fullpath, strerror(errno)); + } /* disable stdio buffering for input file */ setvbuf(in, NULL, _IONBF, BUFSIZ); @@ -667,8 +717,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } /* Already existing destination temp file is not an error condition */ } else @@ -698,16 +751,22 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (errno != EEXIST) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } else /* Successfully created partial file */ break; } else + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot stat temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } /* first round */ @@ -738,8 +797,11 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (out == NULL) { if (!partial_is_stale) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to open temp WAL file \"%s\" in %i seconds", to_fullpath_gz_part, archive_timeout); + } /* Partial segment is considered stale, so reuse it */ elog(LOG, "Reusing stale temp WAL file \"%s\"", to_fullpath_gz_part); @@ -747,12 +809,15 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, out = fio_gzopen(to_fullpath_gz_part, PG_BINARY_W, compress_level, FIO_BACKUP_HOST); if (out == NULL) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot open temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } part_opened: - elog(VERBOSE, "Temp WAL file successfully created: \"%s\"", to_fullpath_gz_part); + elog(LOG, "Temp WAL file successfully created: \"%s\"", to_fullpath_gz_part); /* Check if possible to skip copying, */ if (fileExists(to_fullpath_gz, FIO_BACKUP_HOST)) @@ -760,9 +825,8 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, pg_crc32 crc32_src; pg_crc32 crc32_dst; - /* TODO: what if one of them goes missing? */ - crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); - crc32_dst = fio_get_crc32(to_fullpath_gz, FIO_BACKUP_HOST, true); + crc32_src = fio_get_crc32(from_fullpath, FIO_DB_HOST, false, false); + crc32_dst = fio_get_crc32(to_fullpath_gz, FIO_BACKUP_HOST, true, false); if (crc32_src == crc32_dst) { @@ -785,6 +849,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, * so we must unlink partial file and exit with error. */ fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "WAL file already exists in archive with " "different checksum: \"%s\"", to_fullpath_gz); } @@ -802,6 +867,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (ferror(in)) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot read from source file \"%s\": %s", from_fullpath, strerror(errno)); } @@ -809,6 +875,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (read_len > 0 && fio_gzwrite(out, buf, read_len) != read_len) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, get_gz_error(out, errno)); } @@ -824,14 +891,29 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (fio_check_error_fd_gz(out, &errmsg)) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot write to the remote compressed file \"%s\": %s", to_fullpath_gz_part, errmsg); } + if (wal_file->prev != NULL) + { + while (!pg_atomic_read_u32(&wal_file->prev->done)) + { + if (thread_interrupted || interrupted) + { + pg_atomic_write_u32(&wal_file->done, 1); + elog(ERROR, "Terminated while waiting for prev file"); + } + usleep(250); + } + } + /* close temp file, TODO: make it synchronous */ if (fio_gzclose(out) != 0) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot close compressed temp WAL file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); } @@ -840,11 +922,14 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (!no_sync) { if (fio_sync(to_fullpath_gz_part, FIO_BACKUP_HOST) != 0) + { + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Failed to sync file \"%s\": %s", to_fullpath_gz_part, strerror(errno)); + } } - elog(VERBOSE, "Rename \"%s\" to \"%s\"", + elog(LOG, "Rename \"%s\" to \"%s\"", to_fullpath_gz_part, to_fullpath_gz); //copy_file_attributes(from_path, FIO_DB_HOST, to_path_temp, FIO_BACKUP_HOST, true); @@ -853,6 +938,7 @@ push_file_internal_gz(const char *wal_file_name, const char *pg_xlog_dir, if (fio_rename(to_fullpath_gz_part, to_fullpath_gz, FIO_BACKUP_HOST) < 0) { fio_unlink(to_fullpath_gz_part, FIO_BACKUP_HOST); + pg_atomic_write_u32(&wal_file->done, 1); elog(ERROR, "Cannot rename file \"%s\" to \"%s\": %s", to_fullpath_gz_part, to_fullpath_gz, strerror(errno)); } @@ -906,6 +992,15 @@ get_gz_error(gzFile gzf, int errnum) // } //} +static int +walSegnoCompareName(const void *f1, const void *f2) +{ + WALSegno *w1 = *(WALSegno**)f1; + WALSegno *w2 = *(WALSegno**)f2; + + return strcmp(w1->name, w2->name); +} + /* Look for files with '.ready' suffix in archive_status directory * and pack such files into batch sized array. */ @@ -913,14 +1008,15 @@ parray * setup_push_filelist(const char *archive_status_dir, const char *first_file, int batch_size) { - int i; WALSegno *xlogfile = NULL; parray *status_files = NULL; parray *batch_files = parray_new(); + size_t i; /* guarantee that first filename is in batch list */ - xlogfile = palloc(sizeof(WALSegno)); + xlogfile = palloc0(sizeof(WALSegno)); pg_atomic_init_flag(&xlogfile->lock); + pg_atomic_init_u32(&xlogfile->done, 0); snprintf(xlogfile->name, MAXFNAMELEN, "%s", first_file); parray_append(batch_files, xlogfile); @@ -951,8 +1047,9 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, if (strcmp(filename, first_file) == 0) continue; - xlogfile = palloc(sizeof(WALSegno)); + xlogfile = palloc0(sizeof(WALSegno)); pg_atomic_init_flag(&xlogfile->lock); + pg_atomic_init_u32(&xlogfile->done, 0); snprintf(xlogfile->name, MAXFNAMELEN, "%s", filename); parray_append(batch_files, xlogfile); @@ -961,6 +1058,13 @@ setup_push_filelist(const char *archive_status_dir, const char *first_file, break; } + parray_qsort(batch_files, walSegnoCompareName); + for (i = 1; i < parray_num(batch_files); i++) + { + xlogfile = (WALSegno*) parray_get(batch_files, i); + xlogfile->prev = (WALSegno*) parray_get(batch_files, i-1); + } + /* cleanup */ parray_walk(status_files, pgFileFree); parray_free(status_files); @@ -1155,7 +1259,7 @@ do_archive_get(InstanceState *instanceState, InstanceConfig *instance, const cha if (get_wal_file(wal_file_name, backup_wal_file_path, absolute_wal_file_path, false)) { fail_count = 0; - elog(INFO, "pg_probackup archive-get copied WAL file %s", wal_file_name); + elog(LOG, "pg_probackup archive-get copied WAL file %s", wal_file_name); n_fetched++; break; } @@ -1263,6 +1367,7 @@ uint32 run_wal_prefetch(const char *prefetch_dir, const char *archive_dir, arg->thread_num = i+1; arg->files = batch_files; + arg->n_fetched = 0; } /* Run threads */ @@ -1375,11 +1480,11 @@ get_wal_file(const char *filename, const char *from_fullpath, #ifdef HAVE_LIBZ /* If requested file is regular WAL segment, then try to open it with '.gz' suffix... */ if (IsXLogFileName(filename)) - rc = fio_send_file_gz(from_fullpath_gz, to_fullpath, out, &errmsg); + rc = fio_send_file_gz(from_fullpath_gz, out, &errmsg); if (rc == FILE_MISSING) #endif /* ... failing that, use uncompressed */ - rc = fio_send_file(from_fullpath, to_fullpath, out, NULL, &errmsg); + rc = fio_send_file(from_fullpath, out, false, NULL, &errmsg); /* When not in prefetch mode, try to use partial file */ if (rc == FILE_MISSING && !prefetch_mode && IsXLogFileName(filename)) @@ -1389,13 +1494,13 @@ get_wal_file(const char *filename, const char *from_fullpath, #ifdef HAVE_LIBZ /* '.gz.partial' goes first ... */ snprintf(from_partial, sizeof(from_partial), "%s.gz.partial", from_fullpath); - rc = fio_send_file_gz(from_partial, to_fullpath, out, &errmsg); + rc = fio_send_file_gz(from_partial, out, &errmsg); if (rc == FILE_MISSING) #endif { /* ... failing that, use '.partial' */ snprintf(from_partial, sizeof(from_partial), "%s.partial", from_fullpath); - rc = fio_send_file(from_partial, to_fullpath, out, NULL, &errmsg); + rc = fio_send_file(from_partial, out, false, NULL, &errmsg); } if (rc == SEND_OK) @@ -1511,7 +1616,7 @@ get_wal_file_internal(const char *from_path, const char *to_path, FILE *out, char *buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer */ int exit_code = 0; - elog(VERBOSE, "Attempting to %s WAL file '%s'", + elog(LOG, "Attempting to %s WAL file '%s'", is_decompress ? "open compressed" : "open", from_path); /* open source file for read */ diff --git a/src/backup.c b/src/backup.c index c575865c4..78c3512e9 100644 --- a/src/backup.c +++ b/src/backup.c @@ -3,7 +3,7 @@ * backup.c: backup DB cluster, archived WAL * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -13,6 +13,9 @@ #if PG_VERSION_NUM < 110000 #include "catalog/catalog.h" #endif +#if PG_VERSION_NUM < 120000 +#include "access/transam.h" +#endif #include "catalog/pg_tablespace.h" #include "pgtar.h" #include "streamutil.h" @@ -65,7 +68,10 @@ static bool pg_is_in_recovery(PGconn *conn); static bool pg_is_superuser(PGconn *conn); static void check_server_version(PGconn *conn, PGNodeInfo *nodeInfo); static void confirm_block_size(PGconn *conn, const char *name, int blcksz); -static void set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); +static void rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i); +static bool remove_excluded_files_criterion(void *value, void *exclude_args); +static void backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments); +static void process_file(int i, pgFile *file, backup_files_arg *arguments); static StopBackupCallbackParams stop_callback_params; @@ -78,7 +84,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) */ if (backup_in_progress) { - elog(WARNING, "backup in progress, stop backup"); + elog(WARNING, "A backup is in progress, stopping it."); /* don't care about stop_lsn in case of error */ pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL); } @@ -116,7 +122,9 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, char pretty_time[20]; char pretty_bytes[20]; - elog(LOG, "Database backup start"); + pgFile *src_pg_control_file = NULL; + + elog(INFO, "Database backup start"); if(current.external_dir_str) { external_dirs = make_external_directory_list(current.external_dir_str, @@ -190,9 +198,9 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. " "pg_probackup do not guarantee to be forward compatible. " "Please upgrade pg_probackup binary.", - PROGRAM_VERSION, base36enc(prev_backup->start_time), prev_backup->program_version); + PROGRAM_VERSION, backup_id_of(prev_backup), prev_backup->program_version); - elog(INFO, "Parent backup: %s", base36enc(prev_backup->start_time)); + elog(INFO, "Parent backup: %s", backup_id_of(prev_backup)); /* Files of previous backup needed by DELTA backup */ prev_backup_filelist = get_backup_filelist(prev_backup, true); @@ -233,7 +241,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, "It may indicate that we are trying to backup PostgreSQL instance from the past.", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), (uint32) (prev_backup->start_lsn >> 32), (uint32) (prev_backup->start_lsn), - base36enc(prev_backup->start_time)); + backup_id_of(prev_backup)); /* Update running backup meta with START LSN */ write_backup(¤t, true); @@ -336,11 +344,11 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* Extract information about files in backup_list parsing their names:*/ parse_filelist_filenames(backup_files_list, instance_config.pgdata); - elog(LOG, "Current Start LSN: %X/%X, TLI: %X", + elog(INFO, "Current Start LSN: %X/%X, TLI: %X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), current.tli); if (current.backup_mode != BACKUP_MODE_FULL) - elog(LOG, "Parent Start LSN: %X/%X, TLI: %X", + elog(INFO, "Parent Start LSN: %X/%X, TLI: %X", (uint32) (prev_backup->start_lsn >> 32), (uint32) (prev_backup->start_lsn), prev_backup->tli); @@ -412,12 +420,30 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, else join_path_components(dirpath, current.database_dir, file->rel_path); - elog(VERBOSE, "Create directory '%s'", dirpath); + elog(LOG, "Create directory '%s'", dirpath); fio_mkdir(dirpath, DIR_PERMISSION, FIO_BACKUP_HOST); } } + /* + * find pg_control file + * We'll copy it last + */ + { + int control_file_elem_index; + pgFile search_key; + MemSet(&search_key, 0, sizeof(pgFile)); + /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */ + search_key.rel_path = XLOG_CONTROL_FILE; + search_key.external_dir_num = 0; + control_file_elem_index = parray_bsearch_index(backup_files_list, &search_key, pgFileCompareRelPathWithExternal); + + if (control_file_elem_index < 0) + elog(ERROR, "File \"%s\" not found in PGDATA %s", XLOG_CONTROL_FILE, current.database_dir); + src_pg_control_file = (pgFile *)parray_get(backup_files_list, control_file_elem_index); + } + /* setup thread locks */ pfilearray_clear_locks(backup_files_list); @@ -477,6 +503,26 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, backup_isok = false; } + /* copy pg_control at very end */ + if (backup_isok) + { + + elog(progress ? INFO : LOG, "Progress: Backup file \"%s\"", + src_pg_control_file->rel_path); + + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + join_path_components(from_fullpath, instance_config.pgdata, src_pg_control_file->rel_path); + join_path_components(to_fullpath, current.database_dir, src_pg_control_file->rel_path); + + backup_non_data_file(src_pg_control_file, NULL, + from_fullpath, to_fullpath, + current.backup_mode, current.parent_backup, + true); + } + + + time(&end_time); pretty_time_interval(difftime(end_time, start_time), pretty_time, lengthof(pretty_time)); @@ -504,17 +550,8 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, { pgFile *pg_control = NULL; - for (i = 0; i < parray_num(backup_files_list); i++) - { - pgFile *tmp_file = (pgFile *) parray_get(backup_files_list, i); + pg_control = src_pg_control_file; - if (tmp_file->external_dir_num == 0 && - (strcmp(tmp_file->rel_path, XLOG_CONTROL_FILE) == 0)) - { - pg_control = tmp_file; - break; - } - } if (!pg_control) elog(ERROR, "Failed to find file \"%s\" in backup filelist.", @@ -604,7 +641,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, "It may indicate that we are trying to backup PostgreSQL instance from the past.", (uint32) (current.stop_lsn >> 32), (uint32) (current.stop_lsn), (uint32) (prev_backup->stop_lsn >> 32), (uint32) (prev_backup->stop_lsn), - base36enc(prev_backup->stop_lsn)); + backup_id_of(prev_backup)); /* clean external directories list */ if (external_dirs) @@ -673,7 +710,7 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) nodeInfo->checksum_version = current.checksum_version; if (current.checksum_version) - elog(LOG, "This PostgreSQL instance was initialized with data block checksums. " + elog(INFO, "This PostgreSQL instance was initialized with data block checksums. " "Data block corruption will be detected"); else elog(WARNING, "This PostgreSQL instance was initialized without data block checksums. " @@ -692,15 +729,23 @@ pgdata_basic_setup(ConnectionOptions conn_opt, PGNodeInfo *nodeInfo) /* * Entry point of pg_probackup BACKUP subcommand. + * + * if start_time == INVALID_BACKUP_ID then we can generate backup_id */ int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, - bool no_validate, bool no_sync, bool backup_logs) + bool no_validate, bool no_sync, bool backup_logs, time_t start_time) { PGconn *backup_conn = NULL; PGNodeInfo nodeInfo; + time_t latest_backup_id = INVALID_BACKUP_ID; char pretty_bytes[20]; + if (!instance_config.pgdata) + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); + /* Initialize PGInfonode */ pgNodeInit(&nodeInfo); @@ -709,15 +754,59 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, (pg_strcasecmp(instance_config.external_dir_str, "none") != 0)) current.external_dir_str = instance_config.external_dir_str; - /* Create backup directory and BACKUP_CONTROL_FILE */ - pgBackupCreateDir(¤t, instanceState->instance_backup_subdir_path); + /* Find latest backup_id */ + { + parray *backup_list = catalog_get_backup_list(instanceState, INVALID_BACKUP_ID); - if (!instance_config.pgdata) - elog(ERROR, "required parameter not specified: PGDATA " - "(-D, --pgdata)"); + if (parray_num(backup_list) > 0) + latest_backup_id = ((pgBackup *)parray_get(backup_list, 0))->backup_id; + + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + } + + /* Try to pick backup_id and create backup directory with BACKUP_CONTROL_FILE */ + if (start_time != INVALID_BACKUP_ID) + { + /* If user already choosed backup_id for us, then try to use it. */ + if (start_time <= latest_backup_id) + /* don't care about freeing base36enc_dup memory, we exit anyway */ + elog(ERROR, "Can't assign backup_id from requested start_time (%s), " + "this time must be later that backup %s", + base36enc(start_time), base36enc(latest_backup_id)); + + current.backup_id = start_time; + pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); + } + else + { + /* We can generate our own unique backup_id + * Sometimes (when we try to backup twice in one second) + * backup_id will be duplicated -> try more times. + */ + int attempts = 10; + + if (time(NULL) < latest_backup_id) + elog(ERROR, "Can't assign backup_id, there is already a backup in future (%s)", + base36enc(latest_backup_id)); + + do + { + current.backup_id = time(NULL); + pgBackupInitDir(¤t, instanceState->instance_backup_subdir_path); + if (current.backup_id == INVALID_BACKUP_ID) + sleep(1); + } + while (current.backup_id == INVALID_BACKUP_ID && attempts-- > 0); + } + + /* If creation of backup dir was unsuccessful, there will be WARNINGS in logs already */ + if (current.backup_id == INVALID_BACKUP_ID) + elog(ERROR, "Can't create backup directory"); /* Update backup status and other metainfo. */ current.status = BACKUP_STATUS_RUNNING; + /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ current.start_time = current.backup_id; strlcpy(current.program_version, PROGRAM_VERSION, @@ -728,13 +817,13 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, elog(INFO, "Backup start, pg_probackup version: %s, instance: %s, backup ID: %s, backup mode: %s, " "wal mode: %s, remote: %s, compress-algorithm: %s, compress-level: %i", - PROGRAM_VERSION, instanceState->instance_name, base36enc(current.backup_id), pgBackupGetBackupMode(¤t, false), + PROGRAM_VERSION, instanceState->instance_name, backup_id_of(¤t), pgBackupGetBackupMode(¤t, false), current.stream ? "STREAM" : "ARCHIVE", IsSshProtocol() ? "true" : "false", deparse_compress_alg(current.compress_alg), current.compress_level); if (!lock_backup(¤t, true, true)) elog(ERROR, "Cannot lock backup %s directory", - base36enc(current.backup_id)); + backup_id_of(¤t)); write_backup(¤t, true); /* set the error processing function for the backup process */ @@ -749,7 +838,7 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, backup_conn = pgdata_basic_setup(instance_config.conn_opt, &nodeInfo); if (current.from_replica) - elog(INFO, "Backup %s is going to be taken from standby", base36enc(current.backup_id)); + elog(INFO, "Backup %s is going to be taken from standby", backup_id_of(¤t)); /* TODO, print PostgreSQL full version */ //elog(INFO, "PostgreSQL version: %s", nodeInfo.server_version_str); @@ -837,13 +926,13 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, pretty_size(current.data_bytes + current.wal_bytes, pretty_bytes, lengthof(pretty_bytes)); else pretty_size(current.data_bytes, pretty_bytes, lengthof(pretty_bytes)); - elog(INFO, "Backup %s resident size: %s", base36enc(current.start_time), pretty_bytes); + elog(INFO, "Backup %s resident size: %s", backup_id_of(¤t), pretty_bytes); if (current.status == BACKUP_STATUS_OK || current.status == BACKUP_STATUS_DONE) - elog(INFO, "Backup %s completed", base36enc(current.start_time)); + elog(INFO, "Backup %s completed", backup_id_of(¤t)); else - elog(ERROR, "Backup %s failed", base36enc(current.start_time)); + elog(ERROR, "Backup %s failed", backup_id_of(¤t)); /* * After successful backup completion remove backups @@ -879,12 +968,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) if (nodeInfo->server_version < 90500) elog(ERROR, - "server version is %s, must be %s or higher", + "Server version is %s, must be %s or higher", nodeInfo->server_version_str, "9.5"); if (current.from_replica && nodeInfo->server_version < 90600) elog(ERROR, - "server version is %s, must be %s or higher for backup from replica", + "Server version is %s, must be %s or higher for backup from replica", nodeInfo->server_version_str, "9.6"); if (nodeInfo->pgpro_support) @@ -896,10 +985,21 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) */ #ifdef PGPRO_VERSION if (!res) + { /* It seems we connected to PostgreSQL (not Postgres Pro) */ - elog(ERROR, "%s was built with Postgres Pro %s %s, " - "but connection is made with PostgreSQL %s", - PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, nodeInfo->server_version_str); + if(strcmp(PGPRO_EDITION, "1C") != 0) + { + elog(ERROR, "%s was built with Postgres Pro %s %s, " + "but connection is made with PostgreSQL %s", + PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, nodeInfo->server_version_str); + } + /* We have PostgresPro for 1C and connect to PostgreSQL or PostgresPro for 1C + * Check the major version + */ + if (strcmp(nodeInfo->server_version_str, PG_MAJORVERSION) != 0) + elog(ERROR, "%s was built with PostgrePro %s %s, but connection is made with %s", + PROGRAM_NAME, PG_MAJORVERSION, PGPRO_EDITION, nodeInfo->server_version_str); + } else { if (strcmp(nodeInfo->server_version_str, PG_MAJORVERSION) != 0 && @@ -982,7 +1082,7 @@ confirm_block_size(PGconn *conn, const char *name, int blcksz) res = pgut_execute(conn, "SELECT pg_catalog.current_setting($1)", 1, &name); if (PQntuples(res) != 1 || PQnfields(res) != 1) - elog(ERROR, "cannot get %s: %s", name, PQerrorMessage(conn)); + elog(ERROR, "Cannot get %s: %s", name, PQerrorMessage(conn)); block_size = strtol(PQgetvalue(res, 0, 0), &endp, 10); if ((endp && *endp) || block_size != blcksz) @@ -1006,20 +1106,22 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, uint32 lsn_lo; params[0] = label; +#if PG_VERSION_NUM >= 150000 + elog(INFO, "wait for pg_backup_start()"); +#else elog(INFO, "wait for pg_start_backup()"); +#endif /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; - if (!exclusive_backup) - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2, false)", - 2, - params); - else - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2)", - 2, - params); + res = pgut_execute(conn, +#if PG_VERSION_NUM >= 150000 + "SELECT pg_catalog.pg_backup_start($1, $2)", +#else + "SELECT pg_catalog.pg_start_backup($1, $2, false)", +#endif + 2, + params); /* * Set flag that pg_start_backup() was called. If an error will happen it @@ -1369,7 +1471,7 @@ wait_wal_lsn(const char *wal_segment_dir, XLogRecPtr target_lsn, bool is_start_l } if (!current.stream && is_start_lsn && try_count == 30) - elog(WARNING, "By default pg_probackup assume WAL delivery method to be ARCHIVE. " + elog(WARNING, "By default pg_probackup assumes that WAL delivery method to be ARCHIVE. " "If continuous archiving is not set up, use '--stream' option to make autonomous backup. " "Otherwise check that continuous archiving works correctly."); @@ -1513,7 +1615,7 @@ wait_wal_and_calculate_stop_lsn(const char *xlog_path, XLogRecPtr stop_lsn, pgBa stop_lsn_exists = true; } - elog(LOG, "stop_lsn: %X/%X", + elog(INFO, "stop_lsn: %X/%X", (uint32) (stop_lsn >> 32), (uint32) (stop_lsn)); /* @@ -1585,6 +1687,14 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " labelfile," " spcmapfile" " FROM pg_catalog.pg_stop_backup(false)", + stop_backup_on_master_after15_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " lsn," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_backup_stop(false)", /* * In case of backup from replica >= 9.6 we do not trust minRecPoint * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. @@ -1604,19 +1714,33 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_xlog_replay_location()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)"; + " FROM pg_catalog.pg_stop_backup(false)", + stop_backup_on_replica_after15_query[] = + "SELECT" + " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," + " current_timestamp(0)::timestamptz," + " pg_catalog.pg_last_wal_replay_lsn()," + " labelfile," + " spcmapfile" + " FROM pg_catalog.pg_backup_stop(false)"; const char * const stop_backup_query = is_exclusive ? stop_exlusive_backup_query : - server_version >= 100000 ? + server_version >= 150000 ? (is_started_on_replica ? - stop_backup_on_replica_query : - stop_backup_on_master_query + stop_backup_on_replica_after15_query : + stop_backup_on_master_after15_query ) : - (is_started_on_replica ? - stop_backup_on_replica_before10_query : - stop_backup_on_master_before10_query + (server_version >= 100000 ? + (is_started_on_replica ? + stop_backup_on_replica_query : + stop_backup_on_master_query + ) : + (is_started_on_replica ? + stop_backup_on_replica_before10_query : + stop_backup_on_master_before10_query + ) ); bool sent = false; @@ -1632,7 +1756,11 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica */ sent = pgut_send(conn, stop_backup_query, 0, NULL, WARNING); if (!sent) +#if PG_VERSION_NUM >= 150000 + elog(ERROR, "Failed to send pg_backup_stop query"); +#else elog(ERROR, "Failed to send pg_stop_backup query"); +#endif /* After we have sent pg_stop_backup, we don't need this callback anymore */ pgut_atexit_pop(backup_stopbackup_callback, &stop_callback_params); @@ -1678,7 +1806,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, if (interrupted) { pgut_cancel(conn); - elog(ERROR, "interrupted during waiting for pg_stop_backup"); +#if PG_VERSION_NUM >= 150000 + elog(ERROR, "Interrupted during waiting for pg_backup_stop"); +#else + elog(ERROR, "Interrupted during waiting for pg_stop_backup"); +#endif } if (pg_stop_backup_timeout == 1) @@ -1691,7 +1823,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, if (pg_stop_backup_timeout > timeout) { pgut_cancel(conn); +#if PG_VERSION_NUM >= 150000 + elog(ERROR, "pg_backup_stop doesn't answer in %d seconds, cancel it", timeout); +#else elog(ERROR, "pg_stop_backup doesn't answer in %d seconds, cancel it", timeout); +#endif } } else @@ -1703,7 +1839,11 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* Check successfull execution of pg_stop_backup() */ if (!query_result) +#if PG_VERSION_NUM >= 150000 + elog(ERROR, "pg_backup_stop() failed"); +#else elog(ERROR, "pg_stop_backup() failed"); +#endif else { switch (PQresultStatus(query_result)) @@ -1715,7 +1855,7 @@ pg_stop_backup_consume(PGconn *conn, int server_version, case PGRES_TUPLES_OK: break; default: - elog(ERROR, "query failed: %s query was: %s", + elog(ERROR, "Query failed: %s query was: %s", PQerrorMessage(conn), query_text); } backup_in_progress = false; @@ -1726,13 +1866,13 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* get&check recovery_xid */ if (sscanf(PQgetvalue(query_result, 0, recovery_xid_colno), XID_FMT, &result->snapshot_xid) != 1) elog(ERROR, - "result of txid_snapshot_xmax() is invalid: %s", + "Result of txid_snapshot_xmax() is invalid: %s", PQgetvalue(query_result, 0, recovery_xid_colno)); /* get&check recovery_time */ if (!parse_time(PQgetvalue(query_result, 0, recovery_time_colno), &result->invocation_time, true)) elog(ERROR, - "result of current_timestamp is invalid: %s", + "Result of current_timestamp is invalid: %s", PQgetvalue(query_result, 0, recovery_time_colno)); /* get stop_backup_lsn */ @@ -1790,13 +1930,13 @@ pg_stop_backup_write_file_helper(const char *path, const char *filename, const c join_path_components(full_filename, path, filename); fp = fio_fopen(full_filename, PG_BINARY_W, FIO_BACKUP_HOST); if (fp == NULL) - elog(ERROR, "can't open %s file \"%s\": %s", + elog(ERROR, "Can't open %s file \"%s\": %s", error_msg_filename, full_filename, strerror(errno)); if (fio_fwrite(fp, data, len) != len || fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "can't write %s file \"%s\": %s", + elog(ERROR, "Can't write %s file \"%s\": %s", error_msg_filename, full_filename, strerror(errno)); /* @@ -1835,7 +1975,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb /* Remove it ? */ if (!backup_in_progress) - elog(ERROR, "backup is not in progress"); + elog(ERROR, "Backup is not in progress"); pg_silent_client_messages(pg_startbackup_conn); @@ -1902,7 +2042,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb backup->recovery_xid = stop_backup_result.snapshot_xid; - elog(LOG, "Getting the Recovery Time from WAL"); + elog(INFO, "Getting the Recovery Time from WAL"); /* iterate over WAL from stop_backup lsn to start_backup lsn */ if (!read_recovery_info(xlog_path, backup->tli, @@ -1910,7 +2050,7 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb backup->start_lsn, backup->stop_lsn, &backup->recovery_time)) { - elog(LOG, "Failed to find Recovery Time in WAL, forced to trust current_timestamp"); + elog(INFO, "Failed to find Recovery Time in WAL, forced to trust current_timestamp"); backup->recovery_time = stop_backup_result.invocation_time; } @@ -1933,7 +2073,7 @@ backup_cleanup(bool fatal, void *userdata) if (current.status == BACKUP_STATUS_RUNNING && current.end_time == 0) { elog(WARNING, "Backup %s is running, setting its status to ERROR", - base36enc(current.start_time)); + backup_id_of(¤t)); current.end_time = time(NULL); current.status = BACKUP_STATUS_ERROR; write_backup(¤t, true); @@ -1952,8 +2092,6 @@ static void * backup_files(void *arg) { int i; - char from_fullpath[MAXPGPATH]; - char to_fullpath[MAXPGPATH]; static time_t prev_time; backup_files_arg *arguments = (backup_files_arg *) arg; @@ -1965,11 +2103,17 @@ backup_files(void *arg) for (i = 0; i < n_backup_files_list; i++) { pgFile *file = (pgFile *) parray_get(arguments->files_list, i); - pgFile *prev_file = NULL; /* We have already copied all directories */ if (S_ISDIR(file->mode)) continue; + /* + * Don't copy the pg_control file now, we'll copy it last + */ + if(file->external_dir_num == 0 && pg_strcasecmp(file->rel_path, XLOG_CONTROL_FILE) == 0) + { + continue; + } if (arguments->thread_num == 1) { @@ -1985,100 +2129,179 @@ backup_files(void *arg) } } + if (file->skip_cfs_nested) + continue; + if (!pg_atomic_test_set_flag(&file->lock)) continue; /* check for interrupt */ if (interrupted || thread_interrupted) - elog(ERROR, "interrupted during backup"); + elog(ERROR, "Interrupted during backup"); - if (progress) - elog(INFO, "Progress: (%d/%d). Process file \"%s\"", - i + 1, n_backup_files_list, file->rel_path); - - /* Handle zero sized files */ - if (file->size == 0) - { - file->write_size = 0; - continue; - } + elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"", + i + 1, n_backup_files_list, file->rel_path); - /* construct destination filepath */ - if (file->external_dir_num == 0) + if (file->is_cfs) { - join_path_components(from_fullpath, arguments->from_root, file->rel_path); - join_path_components(to_fullpath, arguments->to_root, file->rel_path); + backup_cfs_segment(i, file, arguments); } else { - char external_dst[MAXPGPATH]; - char *external_path = parray_get(arguments->external_dirs, - file->external_dir_num - 1); + process_file(i, file, arguments); + } + } + + /* ssh connection to longer needed */ + fio_disconnect(); - makeExternalDirPathByNum(external_dst, + /* Data files transferring is successful */ + arguments->ret = 0; + + return NULL; +} + +static void +process_file(int i, pgFile *file, backup_files_arg *arguments) +{ + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; + pgFile *prev_file = NULL; + + elog(progress ? INFO : LOG, "Progress: (%d/%zu). Process file \"%s\"", + i + 1, parray_num(arguments->files_list), file->rel_path); + + /* Handle zero sized files */ + if (file->size == 0) + { + file->write_size = 0; + return; + } + + /* construct from_fullpath & to_fullpath */ + if (file->external_dir_num == 0) + { + join_path_components(from_fullpath, arguments->from_root, file->rel_path); + join_path_components(to_fullpath, arguments->to_root, file->rel_path); + } + else + { + char external_dst[MAXPGPATH]; + char *external_path = parray_get(arguments->external_dirs, + file->external_dir_num - 1); + + makeExternalDirPathByNum(external_dst, arguments->external_prefix, file->external_dir_num); - join_path_components(to_fullpath, external_dst, file->rel_path); - join_path_components(from_fullpath, external_path, file->rel_path); - } - - /* Encountered some strange beast */ - if (!S_ISREG(file->mode)) - elog(WARNING, "Unexpected type %d of file \"%s\", skipping", - file->mode, from_fullpath); + join_path_components(to_fullpath, external_dst, file->rel_path); + join_path_components(from_fullpath, external_path, file->rel_path); + } - /* Check that file exist in previous backup */ - if (current.backup_mode != BACKUP_MODE_FULL) - { - pgFile **prev_file_tmp = NULL; - prev_file_tmp = (pgFile **) parray_bsearch(arguments->prev_filelist, - file, pgFileCompareRelPathWithExternal); - if (prev_file_tmp) - { - /* File exists in previous backup */ - file->exists_in_prev = true; - prev_file = *prev_file_tmp; - } - } + /* Encountered some strange beast */ + if (!S_ISREG(file->mode)) + { + elog(WARNING, "Unexpected type %d of file \"%s\", skipping", + file->mode, from_fullpath); + return; + } - /* backup file */ - if (file->is_datafile && !file->is_cfs) - { - backup_data_file(file, from_fullpath, to_fullpath, - arguments->prev_start_lsn, - current.backup_mode, - instance_config.compress_alg, - instance_config.compress_level, - arguments->nodeInfo->checksum_version, - arguments->hdr_map, false); - } - else + /* Check that file exist in previous backup */ + if (current.backup_mode != BACKUP_MODE_FULL) + { + pgFile **prevFileTmp = NULL; + prevFileTmp = (pgFile **) parray_bsearch(arguments->prev_filelist, + file, pgFileCompareRelPathWithExternal); + if (prevFileTmp) { - backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, - current.backup_mode, current.parent_backup, true); + /* File exists in previous backup */ + file->exists_in_prev = true; + prev_file = *prevFileTmp; } + } - if (file->write_size == FILE_NOT_FOUND) - continue; + /* backup file */ + if (file->is_datafile && !file->is_cfs) + { + backup_data_file(file, from_fullpath, to_fullpath, + arguments->prev_start_lsn, + current.backup_mode, + instance_config.compress_alg, + instance_config.compress_level, + arguments->nodeInfo->checksum_version, + arguments->hdr_map, false); + } + else + { + backup_non_data_file(file, prev_file, from_fullpath, to_fullpath, + current.backup_mode, current.parent_backup, true); + } - if (file->write_size == BYTES_INVALID) - { - elog(VERBOSE, "Skipping the unchanged file: \"%s\"", from_fullpath); - continue; - } + if (file->write_size == FILE_NOT_FOUND) + return; - elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes", - from_fullpath, file->write_size); + if (file->write_size == BYTES_INVALID) + { + elog(LOG, "Skipping the unchanged file: \"%s\"", from_fullpath); + return; } - /* ssh connection to longer needed */ - fio_disconnect(); + elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes", + from_fullpath, file->write_size); - /* Data files transferring is successful */ - arguments->ret = 0; +} - return NULL; +static void +backup_cfs_segment(int i, pgFile *file, backup_files_arg *arguments) { + pgFile *data_file = file; + pgFile *cfm_file = NULL; + pgFile *data_bck_file = NULL; + pgFile *cfm_bck_file = NULL; + + while (data_file->cfs_chain) + { + data_file = data_file->cfs_chain; + if (data_file->forkName == cfm) + cfm_file = data_file; + if (data_file->forkName == cfs_bck) + data_bck_file = data_file; + if (data_file->forkName == cfm_bck) + cfm_bck_file = data_file; + } + data_file = file; + if (data_file->relOid >= FirstNormalObjectId && cfm_file == NULL) + { + elog(ERROR, "'CFS' file '%s' have to have '%s.cfm' companion file", + data_file->rel_path, data_file->name); + } + + elog(LOG, "backup CFS segment %s, data_file=%s, cfm_file=%s, data_bck_file=%s, cfm_bck_file=%s", + data_file->name, data_file->name, cfm_file->name, data_bck_file == NULL? "NULL": data_bck_file->name, cfm_bck_file == NULL? "NULL": cfm_bck_file->name); + + /* storing cfs segment. processing corner case [PBCKP-287] stage 1. + * - when we do have data_bck_file we should skip both data_bck_file and cfm_bck_file if exists. + * they are removed by cfs_recover() during postgres start. + */ + if (data_bck_file) + { + if (cfm_bck_file) + cfm_bck_file->write_size = FILE_NOT_FOUND; + data_bck_file->write_size = FILE_NOT_FOUND; + } + /* else we store cfm_bck_file. processing corner case [PBCKP-287] stage 2. + * - when we do have cfm_bck_file only we should store it. + * it will replace cfm_file after postgres start. + */ + else if (cfm_bck_file) + process_file(i, cfm_bck_file, arguments); + + /* storing cfs segment in order cfm_file -> datafile to guarantee their consistency */ + /* cfm_file could be NULL for system tables. But we don't clear is_cfs flag + * for compatibility with older pg_probackup. */ + if (cfm_file) + process_file(i, cfm_file, arguments); + process_file(i, data_file, arguments); + elog(LOG, "Backup CFS segment %s done", data_file->name); } /* @@ -2108,11 +2331,12 @@ parse_filelist_filenames(parray *files, const char *root) */ if (strcmp(file->name, "pg_compression") == 0) { + /* processing potential cfs tablespace */ Oid tblspcOid; Oid dbOid; char tmp_rel_path[MAXPGPATH]; /* - * Check that the file is located under + * Check that pg_compression is located under * TABLESPACE_VERSION_DIRECTORY */ sscanf_result = sscanf(file->rel_path, PG_TBLSPC_DIR "/%u/%s/%u", @@ -2121,8 +2345,10 @@ parse_filelist_filenames(parray *files, const char *root) /* Yes, it is */ if (sscanf_result == 2 && strncmp(tmp_rel_path, TABLESPACE_VERSION_DIRECTORY, - strlen(TABLESPACE_VERSION_DIRECTORY)) == 0) - set_cfs_datafiles(files, root, file->rel_path, i); + strlen(TABLESPACE_VERSION_DIRECTORY)) == 0) { + /* rewind index to the beginning of cfs tablespace */ + rewind_and_mark_cfs_datafiles(files, root, file->rel_path, i); + } } } @@ -2137,7 +2363,7 @@ parse_filelist_filenames(parray *files, const char *root) */ int unlogged_file_num = i - 1; pgFile *unlogged_file = (pgFile *) parray_get(files, - unlogged_file_num); + unlogged_file_num); unlogged_file_reloid = file->relOid; @@ -2145,11 +2371,10 @@ parse_filelist_filenames(parray *files, const char *root) (unlogged_file_reloid != 0) && (unlogged_file->relOid == unlogged_file_reloid)) { - pgFileFree(unlogged_file); - parray_remove(files, unlogged_file_num); + /* flagged to remove from list on stage 2 */ + unlogged_file->remove_from_list = true; unlogged_file_num--; - i--; unlogged_file = (pgFile *) parray_get(files, unlogged_file_num); @@ -2159,6 +2384,22 @@ parse_filelist_filenames(parray *files, const char *root) i++; } + + /* stage 2. clean up from temporary tables */ + parray_remove_if(files, remove_excluded_files_criterion, NULL, pgFileFree); +} + +static bool +remove_excluded_files_criterion(void *value, void *exclude_args) { + pgFile *file = (pgFile*)value; + return file->remove_from_list; +} + +static uint32 +hash_rel_seg(pgFile* file) +{ + uint32 hash = hash_mix32_2(file->relOid, file->segno); + return hash_mix32_2(hash, 0xcf5); } /* If file is equal to pg_compression, then we consider this tablespace as @@ -2172,43 +2413,95 @@ parse_filelist_filenames(parray *files, const char *root) * tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid/1 * tblspcOid/TABLESPACE_VERSION_DIRECTORY/dboid/1.cfm * tblspcOid/TABLESPACE_VERSION_DIRECTORY/pg_compression + * + * @returns index of first tablespace entry, i.e tblspcOid/TABLESPACE_VERSION_DIRECTORY */ static void -set_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) +rewind_and_mark_cfs_datafiles(parray *files, const char *root, char *relative, size_t i) { int len; int p; + int j; pgFile *prev_file; + pgFile *tmp_file; char *cfs_tblspc_path; + uint32 h; + + /* hash table for cfm files */ +#define HASHN 128 + parray *hashtab[HASHN] = {NULL}; + parray *bucket; + for (p = 0; p < HASHN; p++) + hashtab[p] = parray_new(); + cfs_tblspc_path = strdup(relative); if(!cfs_tblspc_path) elog(ERROR, "Out of memory"); len = strlen("/pg_compression"); cfs_tblspc_path[strlen(cfs_tblspc_path) - len] = 0; - elog(VERBOSE, "CFS DIRECTORY %s, pg_compression path: %s", cfs_tblspc_path, relative); + elog(LOG, "CFS DIRECTORY %s, pg_compression path: %s", cfs_tblspc_path, relative); for (p = (int) i; p >= 0; p--) { prev_file = (pgFile *) parray_get(files, (size_t) p); - elog(VERBOSE, "Checking file in cfs tablespace %s", prev_file->rel_path); + elog(LOG, "Checking file in cfs tablespace %s", prev_file->rel_path); - if (strstr(prev_file->rel_path, cfs_tblspc_path) != NULL) + if (strstr(prev_file->rel_path, cfs_tblspc_path) == NULL) { - if (S_ISREG(prev_file->mode) && prev_file->is_datafile) + elog(LOG, "Breaking on %s", prev_file->rel_path); + break; + } + + if (!S_ISREG(prev_file->mode)) + continue; + + h = hash_rel_seg(prev_file); + bucket = hashtab[h % HASHN]; + + if (prev_file->forkName == cfm || prev_file->forkName == cfm_bck || + prev_file->forkName == cfs_bck) + { + prev_file->skip_cfs_nested = true; + parray_append(bucket, prev_file); + } + else if (prev_file->is_datafile && prev_file->forkName == none) + { + elog(LOG, "Processing 'cfs' file %s", prev_file->rel_path); + /* have to mark as is_cfs even for system-tables for compatibility + * with older pg_probackup */ + prev_file->is_cfs = true; + prev_file->cfs_chain = NULL; + for (j = 0; j < parray_num(bucket); j++) { - elog(VERBOSE, "Setting 'is_cfs' on file %s, name %s", - prev_file->rel_path, prev_file->name); - prev_file->is_cfs = true; + tmp_file = parray_get(bucket, j); + elog(LOG, "Linking 'cfs' file '%s' to '%s'", + tmp_file->rel_path, prev_file->rel_path); + if (tmp_file->relOid == prev_file->relOid && + tmp_file->segno == prev_file->segno) + { + tmp_file->cfs_chain = prev_file->cfs_chain; + prev_file->cfs_chain = tmp_file; + parray_remove(bucket, j); + j--; + } } } - else + } + + for (p = 0; p < HASHN; p++) + { + bucket = hashtab[p]; + for (j = 0; j < parray_num(bucket); j++) { - elog(VERBOSE, "Breaking on %s", prev_file->rel_path); - break; + tmp_file = parray_get(bucket, j); + elog(WARNING, "Orphaned cfs related file '%s'", tmp_file->rel_path); } + parray_free(bucket); + hashtab[p] = NULL; } +#undef HASHN free(cfs_tblspc_path); } diff --git a/src/catalog.c b/src/catalog.c index b4ed8c189..b29090789 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -23,7 +23,7 @@ static pgBackup* get_closest_backup(timelineInfo *tlinfo); static pgBackup* get_oldest_backup(timelineInfo *tlinfo); static const char *backupModes[] = {"", "PAGE", "PTRACK", "DELTA", "FULL"}; static pgBackup *readBackupControlFile(const char *path); -static time_t create_backup_dir(pgBackup *backup, const char *backup_instance_path); +static int create_backup_dir(pgBackup *backup, const char *backup_instance_path); static bool backup_lock_exit_hook_registered = false; static parray *locks = NULL; @@ -153,7 +153,7 @@ write_backup_status(pgBackup *backup, BackupStatus status, /* lock backup in exclusive mode */ if (!lock_backup(tmp, strict, true)) - elog(ERROR, "Cannot lock backup %s directory", base36enc(backup->start_time)); + elog(ERROR, "Cannot lock backup %s directory", backup_id_of(backup)); write_backup(tmp, strict); @@ -193,7 +193,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) join_path_components(lock_file, backup->root_dir, BACKUP_LOCK_FILE); - rc = grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict); + rc = grab_excl_lock_file(backup->root_dir, backup_id_of(backup), strict); if (rc == LOCK_FAIL_TIMEOUT) return false; @@ -258,7 +258,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) * freed some space on filesystem, thanks to unlinking of BACKUP_RO_LOCK_FILE. * If somebody concurrently acquired exclusive lock file first, then we should give up. */ - if (grab_excl_lock_file(backup->root_dir, base36enc(backup->start_time), strict) == LOCK_FAIL_TIMEOUT) + if (grab_excl_lock_file(backup->root_dir, backup_id_of(backup), strict) == LOCK_FAIL_TIMEOUT) return false; return true; @@ -275,7 +275,7 @@ lock_backup(pgBackup *backup, bool strict, bool exclusive) /* save lock metadata for later unlocking */ lock = pgut_malloc(sizeof(LockInfo)); - snprintf(lock->backup_id, 10, "%s", base36enc(backup->backup_id)); + snprintf(lock->backup_id, 10, "%s", backup_id_of(backup)); snprintf(lock->backup_dir, MAXPGPATH, "%s", backup->root_dir); lock->exclusive = exclusive; @@ -521,7 +521,7 @@ grab_excl_lock_file(const char *root_dir, const char *backup_id, bool strict) } // elog(LOG, "Acquired exclusive lock for backup %s after %ds", -// base36enc(backup->start_time), +// backup_id_of(backup), // LOCK_TIMEOUT - ntries + LOCK_STALE_TIMEOUT - empty_tries); return LOCK_OK; @@ -561,7 +561,7 @@ wait_shared_owners(pgBackup *backup) { if (interrupted) elog(ERROR, "Interrupted while locking backup %s", - base36enc(backup->start_time)); + backup_id_of(backup)); if (encoded_pid == my_pid) break; @@ -573,10 +573,10 @@ wait_shared_owners(pgBackup *backup) if ((ntries % LOG_FREQ) == 0) { elog(WARNING, "Process %d is using backup %s in shared mode, and is still running", - encoded_pid, base36enc(backup->start_time)); + encoded_pid, backup_id_of(backup)); elog(WARNING, "Waiting %u seconds on lock for backup %s", ntries, - base36enc(backup->start_time)); + backup_id_of(backup)); } sleep(1); @@ -604,7 +604,7 @@ wait_shared_owners(pgBackup *backup) if (ntries <= 0) { elog(WARNING, "Cannot to lock backup %s in exclusive mode, because process %u owns shared lock", - base36enc(backup->start_time), encoded_pid); + backup_id_of(backup), encoded_pid); return 1; } @@ -891,7 +891,7 @@ catalog_get_instance_list(CatalogState *catalogState) instanceState = pgut_new(InstanceState); - strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); + strlcpy(instanceState->instance_name, dent->d_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, catalogState->backup_subdir_path, instanceState->instance_name); join_path_components(instanceState->instance_wal_subdir_path, @@ -963,14 +963,18 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id if (!backup) { - backup = pgut_new(pgBackup); + backup = pgut_new0(pgBackup); pgBackupInit(backup); backup->start_time = base36dec(data_ent->d_name); + /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ + Assert(backup->backup_id == 0 || backup->backup_id == backup->start_time); + backup->backup_id = backup->start_time; } - else if (strcmp(base36enc(backup->start_time), data_ent->d_name) != 0) + else if (strcmp(backup_id_of(backup), data_ent->d_name) != 0) { + /* TODO there is no such guarantees */ elog(WARNING, "backup ID in control file \"%s\" doesn't match name of the backup folder \"%s\"", - base36enc(backup->start_time), backup_conf_path); + backup_id_of(backup), backup_conf_path); } backup->root_dir = pgut_strdup(data_path); @@ -982,7 +986,6 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id init_header_map(backup); /* TODO: save encoded backup id */ - backup->backup_id = backup->start_time; if (requested_backup_id != INVALID_BACKUP_ID && requested_backup_id != backup->start_time) { @@ -1009,7 +1012,7 @@ catalog_get_backup_list(InstanceState *instanceState, time_t requested_backup_id { pgBackup *curr = parray_get(backups, i); pgBackup **ancestor; - pgBackup key; + pgBackup key = {0}; if (curr->backup_mode == BACKUP_MODE_FULL) continue; @@ -1052,7 +1055,7 @@ get_backup_filelist(pgBackup *backup, bool strict) fp = fio_open_stream(backup_filelist_path, FIO_BACKUP_HOST); if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", backup_filelist_path, strerror(errno)); /* enable stdio buffering for local file */ if (!fio_is_remote(FIO_BACKUP_HOST)) @@ -1068,6 +1071,7 @@ get_backup_filelist(pgBackup *backup, bool strict) char linked[MAXPGPATH]; char compress_alg_string[MAXPGPATH]; int64 write_size, + uncompressed_size, mode, /* bit length of mode_t depends on platforms */ is_datafile, is_cfs, @@ -1084,15 +1088,15 @@ get_backup_filelist(pgBackup *backup, bool strict) COMP_FILE_CRC32(true, content_crc, buf, strlen(buf)); - get_control_value(buf, "path", path, NULL, true); - get_control_value(buf, "size", NULL, &write_size, true); - get_control_value(buf, "mode", NULL, &mode, true); - get_control_value(buf, "is_datafile", NULL, &is_datafile, true); - get_control_value(buf, "is_cfs", NULL, &is_cfs, false); - get_control_value(buf, "crc", NULL, &crc, true); - get_control_value(buf, "compress_alg", compress_alg_string, NULL, false); - get_control_value(buf, "external_dir_num", NULL, &external_dir_num, false); - get_control_value(buf, "dbOid", NULL, &dbOid, false); + get_control_value_str(buf, "path", path, sizeof(path),true); + get_control_value_int64(buf, "size", &write_size, true); + get_control_value_int64(buf, "mode", &mode, true); + get_control_value_int64(buf, "is_datafile", &is_datafile, true); + get_control_value_int64(buf, "is_cfs", &is_cfs, false); + get_control_value_int64(buf, "crc", &crc, true); + get_control_value_str(buf, "compress_alg", compress_alg_string, sizeof(compress_alg_string), false); + get_control_value_int64(buf, "external_dir_num", &external_dir_num, false); + get_control_value_int64(buf, "dbOid", &dbOid, false); file = pgFileInit(path); file->write_size = (int64) write_size; @@ -1107,30 +1111,58 @@ get_backup_filelist(pgBackup *backup, bool strict) /* * Optional fields */ - if (get_control_value(buf, "linked", linked, NULL, false) && linked[0]) + if (get_control_value_str(buf, "linked", linked, sizeof(linked), false) && linked[0]) { file->linked = pgut_strdup(linked); canonicalize_path(file->linked); } - if (get_control_value(buf, "segno", NULL, &segno, false)) + if (get_control_value_int64(buf, "segno", &segno, false)) file->segno = (int) segno; - if (get_control_value(buf, "n_blocks", NULL, &n_blocks, false)) + if (get_control_value_int64(buf, "n_blocks", &n_blocks, false)) file->n_blocks = (int) n_blocks; - if (get_control_value(buf, "n_headers", NULL, &n_headers, false)) + if (get_control_value_int64(buf, "n_headers", &n_headers, false)) file->n_headers = (int) n_headers; - if (get_control_value(buf, "hdr_crc", NULL, &hdr_crc, false)) + if (get_control_value_int64(buf, "hdr_crc", &hdr_crc, false)) file->hdr_crc = (pg_crc32) hdr_crc; - if (get_control_value(buf, "hdr_off", NULL, &hdr_off, false)) + if (get_control_value_int64(buf, "hdr_off", &hdr_off, false)) file->hdr_off = hdr_off; - if (get_control_value(buf, "hdr_size", NULL, &hdr_size, false)) + if (get_control_value_int64(buf, "hdr_size", &hdr_size, false)) file->hdr_size = (int) hdr_size; + if (get_control_value_int64(buf, "full_size", &uncompressed_size, false)) + file->uncompressed_size = uncompressed_size; + else + file->uncompressed_size = write_size; + if (!file->is_datafile || file->is_cfs) + file->size = file->uncompressed_size; + + if (file->external_dir_num == 0 && + (file->dbOid != 0 || + path_is_prefix_of_path("global", file->rel_path)) && + S_ISREG(file->mode)) + { + bool is_datafile = file->is_datafile; + set_forkname(file); + if (is_datafile != file->is_datafile) + { + if (is_datafile) + elog(WARNING, "File '%s' was stored as datafile, but looks like it is not", + file->rel_path); + else + elog(WARNING, "File '%s' was stored as non-datafile, but looks like it is", + file->rel_path); + /* Lets fail in tests */ + Assert(file->is_datafile == file->is_datafile); + file->is_datafile = is_datafile; + } + } + parray_append(files, file); } @@ -1153,7 +1185,7 @@ get_backup_filelist(pgBackup *backup, bool strict) /* redundant sanity? */ if (!files) - elog(strict ? ERROR : WARNING, "Failed to get file list for backup %s", base36enc(backup->start_time)); + elog(strict ? ERROR : WARNING, "Failed to get file list for backup %s", backup_id_of(backup)); return files; } @@ -1179,7 +1211,7 @@ catalog_lock_backup_list(parray *backup_list, int from_idx, int to_idx, bool str pgBackup *backup = (pgBackup *) parray_get(backup_list, i); if (!lock_backup(backup, strict, exclusive)) elog(ERROR, "Cannot lock backup %s directory", - base36enc(backup->start_time)); + backup_id_of(backup)); } } @@ -1192,7 +1224,6 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current int i; pgBackup *full_backup = NULL; pgBackup *tmp_backup = NULL; - char *invalid_backup_id; /* backup_list is sorted in order of descending ID */ for (i = 0; i < parray_num(backup_list); i++) @@ -1213,7 +1244,7 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current return NULL; elog(LOG, "Latest valid FULL backup: %s", - base36enc(full_backup->start_time)); + backup_id_of(full_backup)); /* FULL backup is found, lets find his latest child */ for (i = 0; i < parray_num(backup_list); i++) @@ -1228,20 +1259,14 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current { /* broken chain */ case ChainIsBroken: - invalid_backup_id = base36enc_dup(tmp_backup->parent_backup); - elog(WARNING, "Backup %s has missing parent: %s. Cannot be a parent", - base36enc(backup->start_time), invalid_backup_id); - pg_free(invalid_backup_id); + backup_id_of(backup), base36enc(tmp_backup->parent_backup)); continue; /* chain is intact, but at least one parent is invalid */ case ChainIsInvalid: - invalid_backup_id = base36enc_dup(tmp_backup->start_time); - elog(WARNING, "Backup %s has invalid parent: %s. Cannot be a parent", - base36enc(backup->start_time), invalid_backup_id); - pg_free(invalid_backup_id); + backup_id_of(backup), backup_id_of(tmp_backup)); continue; /* chain is ok */ @@ -1260,7 +1285,7 @@ catalog_get_last_data_backup(parray *backup_list, TimeLineID tli, time_t current else { elog(WARNING, "Backup %s has status: %s. Cannot be a parent.", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); } } @@ -1346,7 +1371,7 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, return NULL; else elog(LOG, "Latest valid full backup: %s, tli: %i", - base36enc(ancestor_backup->start_time), ancestor_backup->tli); + backup_id_of(ancestor_backup), ancestor_backup->tli); /* At this point we found suitable full backup, * now we must find his latest child, suitable to be @@ -1411,20 +1436,34 @@ get_multi_timeline_parent(parray *backup_list, parray *tli_list, return NULL; } -/* Create backup directory in $BACKUP_PATH - * Note, that backup_id attribute is updated, - * so it is possible to get diffrent values in +/* + * Create backup directory in $BACKUP_PATH + * (with proposed backup->backup_id) + * and initialize this directory. + * If creation of directory fails, then + * backup_id will be cleared (set to INVALID_BACKUP_ID). + * It is possible to get diffrent values in * pgBackup.start_time and pgBackup.backup_id. * It may be ok or maybe not, so it's up to the caller * to fix it or let it be. */ void -pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) +pgBackupInitDir(pgBackup *backup, const char *backup_instance_path) { - int i; - parray *subdirs = parray_new(); + int i; + char temp[MAXPGPATH]; + parray *subdirs; + + /* Try to create backup directory at first */ + if (create_backup_dir(backup, backup_instance_path) != 0) + { + /* Clear backup_id as indication of error */ + reset_backup_id(backup); + return; + } + subdirs = parray_new(); parray_append(subdirs, pg_strdup(DATABASE_DIR)); /* Add external dirs containers */ @@ -1436,7 +1475,6 @@ pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) false); for (i = 0; i < parray_num(external_list); i++) { - char temp[MAXPGPATH]; /* Numeration of externaldirs starts with 1 */ makeExternalDirPathByNum(temp, EXTERNAL_DIR, i+1); parray_append(subdirs, pg_strdup(temp)); @@ -1444,11 +1482,6 @@ pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) free_dir_list(external_list); } - backup->backup_id = create_backup_dir(backup, backup_instance_path); - - if (backup->backup_id == 0) - elog(ERROR, "Cannot create backup directory: %s", strerror(errno)); - backup->database_dir = pgut_malloc(MAXPGPATH); join_path_components(backup->database_dir, backup->root_dir, DATABASE_DIR); @@ -1458,10 +1491,8 @@ pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) /* create directories for actual backup files */ for (i = 0; i < parray_num(subdirs); i++) { - char path[MAXPGPATH]; - - join_path_components(path, backup->root_dir, parray_get(subdirs, i)); - fio_mkdir(path, DIR_PERMISSION, FIO_BACKUP_HOST); + join_path_components(temp, backup->root_dir, parray_get(subdirs, i)); + fio_mkdir(temp, DIR_PERMISSION, FIO_BACKUP_HOST); } free_dir_list(subdirs); @@ -1470,36 +1501,26 @@ pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path) /* * Create root directory for backup, * update pgBackup.root_dir if directory creation was a success + * Return values (same as dir_create_dir()): + * 0 - ok + * -1 - error (warning message already emitted) */ -time_t +int create_backup_dir(pgBackup *backup, const char *backup_instance_path) { - int attempts = 10; + int rc; + char path[MAXPGPATH]; - while (attempts--) - { - int rc; - char path[MAXPGPATH]; - time_t backup_id = time(NULL); + join_path_components(path, backup_instance_path, backup_id_of(backup)); - join_path_components(path, backup_instance_path, base36enc(backup_id)); + /* TODO: add wrapper for remote mode */ + rc = dir_create_dir(path, DIR_PERMISSION, true); - /* TODO: add wrapper for remote mode */ - rc = dir_create_dir(path, DIR_PERMISSION, true); - - if (rc == 0) - { - backup->root_dir = pgut_strdup(path); - return backup_id; - } - else - { - elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); - sleep(1); - } - } - - return 0; + if (rc == 0) + backup->root_dir = pgut_strdup(path); + else + elog(WARNING, "Cannot create directory \"%s\": %s", path, strerror(errno)); + return rc; } /* @@ -1605,7 +1626,8 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) } /* temp WAL segment */ else if (IsTempXLogFileName(file->name) || - IsTempCompressXLogFileName(file->name)) + IsTempCompressXLogFileName(file->name) || + IsTempPartialXLogFileName(file->name)) { elog(VERBOSE, "temp WAL file \"%s\"", file->name); @@ -1855,7 +1877,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) { elog(LOG, "Pinned backup %s is ignored for the " "purpose of WAL retention", - base36enc(backup->start_time)); + backup_id_of(backup)); continue; } @@ -2041,7 +2063,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) elog(LOG, "Archive backup %s to stay consistent " "protect from purge WAL interval " "between %s and %s on timeline %i", - base36enc(backup->start_time), + backup_id_of(backup), begin_segno_str, end_segno_str, backup->tli); if (tlinfo->keep_segments == NULL) @@ -2223,6 +2245,12 @@ do_set_backup(InstanceState *instanceState, time_t backup_id, if (set_backup_params->note) add_note(target_backup, set_backup_params->note); + /* Cleanup */ + if (backup_list) + { + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + } } /* @@ -2236,7 +2264,7 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params) /* sanity, backup must have positive recovery-time */ if (target_backup->recovery_time <= 0) elog(ERROR, "Failed to set 'expire-time' for backup %s: invalid 'recovery-time'", - base36enc(target_backup->backup_id)); + backup_id_of(target_backup)); /* Pin comes from ttl */ if (set_backup_params->ttl > 0) @@ -2250,7 +2278,7 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params) if (target_backup->expire_time == 0) { elog(WARNING, "Backup %s is not pinned, nothing to unpin", - base36enc(target_backup->start_time)); + backup_id_of(target_backup)); return; } target_backup->expire_time = 0; @@ -2270,11 +2298,11 @@ pin_backup(pgBackup *target_backup, pgSetBackupParams *set_backup_params) char expire_timestamp[100]; time2iso(expire_timestamp, lengthof(expire_timestamp), target_backup->expire_time, false); - elog(INFO, "Backup %s is pinned until '%s'", base36enc(target_backup->start_time), + elog(INFO, "Backup %s is pinned until '%s'", backup_id_of(target_backup), expire_timestamp); } else - elog(INFO, "Backup %s is unpinned", base36enc(target_backup->start_time)); + elog(INFO, "Backup %s is unpinned", backup_id_of(target_backup)); return; } @@ -2288,13 +2316,14 @@ add_note(pgBackup *target_backup, char *note) { char *note_string; + char *p; /* unset note */ if (pg_strcasecmp(note, "none") == 0) { target_backup->note = NULL; elog(INFO, "Removing note from backup %s", - base36enc(target_backup->start_time)); + backup_id_of(target_backup)); } else { @@ -2304,12 +2333,12 @@ add_note(pgBackup *target_backup, char *note) * we save only "aaa" * Example: tests.set_backup.SetBackupTest.test_add_note_newlines */ - note_string = pgut_malloc(MAX_NOTE_SIZE); - sscanf(note, "%[^\n]", note_string); + p = strchr(note, '\n'); + note_string = pgut_strndup(note, p ? (p-note) : MAX_NOTE_SIZE); target_backup->note = note_string; elog(INFO, "Adding note to backup %s: '%s'", - base36enc(target_backup->start_time), target_backup->note); + backup_id_of(target_backup), target_backup->note); } /* Update backup.control */ @@ -2491,7 +2520,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, char control_path[MAXPGPATH]; char control_path_temp[MAXPGPATH]; size_t i = 0; - #define BUFFERSZ 1024*1024 + #define BUFFERSZ (1024*1024) char *buf; int64 backup_size_on_disk = 0; int64 uncompressed_size_on_disk = 0; @@ -2561,6 +2590,11 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, file->external_dir_num, file->dbOid); + if (file->uncompressed_size != 0 && + file->uncompressed_size != file->write_size) + len += sprintf(line+len, ",\"full_size\":\"" INT64_FORMAT "\"", + file->uncompressed_size); + if (file->is_datafile) len += sprintf(line+len, ",\"segno\":\"%d\"", file->segno); @@ -2623,7 +2657,7 @@ write_backup_filelist(pgBackup *backup, parray *files, const char *root, static pgBackup * readBackupControlFile(const char *path) { - pgBackup *backup = pgut_new(pgBackup); + pgBackup *backup = pgut_new0(pgBackup); char *backup_mode = NULL; char *start_lsn = NULL; char *stop_lsn = NULL; @@ -2693,6 +2727,9 @@ readBackupControlFile(const char *path) pgBackupFree(backup); return NULL; } + /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ + Assert(backup->backup_id == 0 || backup->backup_id == backup->start_time); + backup->backup_id = backup->start_time; if (backup_mode) { @@ -2804,7 +2841,7 @@ parse_backup_mode(const char *value) return BACKUP_MODE_DIFF_DELTA; /* Backup mode is invalid, so leave with an error */ - elog(ERROR, "invalid backup-mode \"%s\"", value); + elog(ERROR, "Invalid backup-mode \"%s\"", value); return BACKUP_MODE_INVALID; } @@ -2839,7 +2876,7 @@ parse_compress_alg(const char *arg) len = strlen(arg); if (len == 0) - elog(ERROR, "compress algorithm is empty"); + elog(ERROR, "Compress algorithm is empty"); if (pg_strncasecmp("zlib", arg, len) == 0) return ZLIB_COMPRESS; @@ -2848,7 +2885,7 @@ parse_compress_alg(const char *arg) else if (pg_strncasecmp("none", arg, len) == 0) return NONE_COMPRESS; else - elog(ERROR, "invalid compress algorithm value \"%s\"", arg); + elog(ERROR, "Invalid compress algorithm value \"%s\"", arg); return NOT_DEFINED_COMPRESS; } @@ -3026,7 +3063,7 @@ find_parent_full_backup(pgBackup *current_backup) base36enc(base_full_backup->parent_backup)); else elog(WARNING, "Failed to find parent FULL backup for %s", - base36enc(current_backup->start_time)); + backup_id_of(current_backup)); return NULL; } diff --git a/src/catchup.c b/src/catchup.c index 1b8f8084d..00752b194 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -2,7 +2,7 @@ * * catchup.c: sync DB cluster * - * Copyright (c) 2021, Postgres Professional + * Copyright (c) 2021-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -171,10 +171,13 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (current.backup_mode != BACKUP_MODE_FULL) { - dest_id = get_system_identifier(dest_pgdata, FIO_LOCAL_HOST, false); + ControlFileData dst_control; + get_control_file_or_back_file(dest_pgdata, FIO_LOCAL_HOST, &dst_control); + dest_id = dst_control.system_identifier; + if (source_conn_id != dest_id) - elog(ERROR, "Database identifiers mismatch: we connected to DB id %lu, but in \"%s\" we found id %lu", - source_conn_id, dest_pgdata, dest_id); + elog(ERROR, "Database identifiers mismatch: we connected to DB id %llu, but in \"%s\" we found id %llu", + (long long)source_conn_id, dest_pgdata, (long long)dest_id); } } @@ -184,7 +187,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, if (source_node_info->ptrack_version_num == 0) elog(ERROR, "This PostgreSQL instance does not support ptrack"); else if (source_node_info->ptrack_version_num < 200) - elog(ERROR, "ptrack extension is too old.\n" + elog(ERROR, "Ptrack extension is too old.\n" "Upgrade ptrack to version >= 2"); else if (!source_node_info->is_ptrack_enabled) elog(ERROR, "Ptrack is disabled"); @@ -203,7 +206,7 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, /* fill dest_redo.lsn and dest_redo.tli */ get_redo(dest_pgdata, FIO_LOCAL_HOST, &dest_redo); - elog(VERBOSE, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X", + elog(LOG, "source.tli = %X, dest_redo.lsn = %X/%X, dest_redo.tli = %X", current.tli, (uint32) (dest_redo.lsn >> 32), (uint32) dest_redo.lsn, dest_redo.tli); if (current.tli != 1) @@ -398,9 +401,8 @@ catchup_thread_runner(void *arg) if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during catchup"); - if (progress) - elog(INFO, "Progress: (%d/%d). Process file \"%s\"", - i + 1, n_files, file->rel_path); + elog(progress ? INFO : LOG, "Progress: (%d/%d). Process file \"%s\"", + i + 1, n_files, file->rel_path); /* construct destination filepath */ Assert(file->external_dir_num == 0); @@ -447,12 +449,12 @@ catchup_thread_runner(void *arg) if (file->write_size == BYTES_INVALID) { - elog(VERBOSE, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size); + elog(LOG, "Skipping the unchanged file: \"%s\", read %li bytes", from_fullpath, file->read_size); continue; } arguments->transfered_bytes += file->write_size; - elog(VERBOSE, "File \"%s\". Copied "INT64_FORMAT " bytes", + elog(LOG, "File \"%s\". Copied "INT64_FORMAT " bytes", from_fullpath, file->write_size); } @@ -507,16 +509,20 @@ catchup_multithreaded_copy(int num_threads, /* Run threads */ thread_interrupted = false; threads = (pthread_t *) palloc(sizeof(pthread_t) * num_threads); - for (i = 0; i < num_threads; i++) + if (!dry_run) { - elog(VERBOSE, "Start thread num: %i", i); - pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + for (i = 0; i < num_threads; i++) + { + elog(VERBOSE, "Start thread num: %i", i); + pthread_create(&threads[i], NULL, &catchup_thread_runner, &(threads_args[i])); + } } /* Wait threads */ for (i = 0; i < num_threads; i++) { - pthread_join(threads[i], NULL); + if (!dry_run) + pthread_join(threads[i], NULL); all_threads_successful &= threads_args[i].completed; transfered_bytes_result += threads_args[i].transfered_bytes; } @@ -603,7 +609,7 @@ filter_filelist(parray *filelist, const char *pgdata, && parray_bsearch(exclude_relative_paths_list, file->rel_path, pgPrefixCompareString)!= NULL) ) { - elog(LOG, "%s file \"%s\" excluded with --exclude-path option", logging_string, full_path); + elog(INFO, "%s file \"%s\" excluded with --exclude-path option", logging_string, full_path); file->excluded = true; } } @@ -637,6 +643,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, ssize_t transfered_walfiles_bytes = 0; char pretty_source_bytes[20]; + char dest_pg_control_fullpath[MAXPGPATH]; + char dest_pg_control_bak_fullpath[MAXPGPATH]; + source_conn = catchup_init_state(&source_node_info, source_pgdata, dest_pgdata); catchup_preflight_checks(&source_node_info, source_conn, source_pgdata, dest_pgdata); @@ -646,7 +655,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (exclude_relative_paths_list != NULL) parray_qsort(exclude_relative_paths_list, pgCompareString); - elog(LOG, "Database catchup start"); + elog(INFO, "Database catchup start"); if (current.backup_mode != BACKUP_MODE_FULL) { @@ -693,7 +702,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Call pg_start_backup function in PostgreSQL connect */ pg_start_backup(label, smooth_checkpoint, ¤t, &source_node_info, source_conn); - elog(LOG, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); + elog(INFO, "pg_start_backup START LSN %X/%X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn)); } /* Sanity: source cluster must be "in future" relatively to dest cluster */ @@ -706,9 +715,14 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Start stream replication */ join_path_components(dest_xlog_path, dest_pgdata, PG_XLOG_DIR); - fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); - start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, - current.start_lsn, current.tli, false); + if (!dry_run) + { + fio_mkdir(dest_xlog_path, DIR_PERMISSION, FIO_LOCAL_HOST); + start_WAL_streaming(source_conn, dest_xlog_path, &instance_config.conn_opt, + current.start_lsn, current.tli, false); + } + else + elog(INFO, "WAL streaming skipping with --dry-run option"); source_filelist = parray_new(); @@ -763,11 +777,11 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, elog(INFO, "Source PGDATA size: %s (excluded %s)", pretty_source_bytes, pretty_bytes); } - elog(LOG, "Start LSN (source): %X/%X, TLI: %X", + elog(INFO, "Start LSN (source): %X/%X, TLI: %X", (uint32) (current.start_lsn >> 32), (uint32) (current.start_lsn), current.tli); if (current.backup_mode != BACKUP_MODE_FULL) - elog(LOG, "LSN in destination: %X/%X, TLI: %X", + elog(INFO, "LSN in destination: %X/%X, TLI: %X", (uint32) (dest_redo.lsn >> 32), (uint32) (dest_redo.lsn), dest_redo.tli); @@ -779,9 +793,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, /* Build the page map from ptrack information */ make_pagemap_from_ptrack_2(source_filelist, source_conn, - source_node_info.ptrack_schema, - source_node_info.ptrack_version_num, - dest_redo.lsn); + source_node_info.ptrack_schema, + source_node_info.ptrack_version_num, + dest_redo.lsn); time(&end_time); elog(INFO, "Pagemap successfully extracted, time elapsed: %.0f sec", difftime(end_time, start_time)); @@ -821,8 +835,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(dirpath, dest_pgdata, file->rel_path); - elog(VERBOSE, "Create directory '%s'", dirpath); - fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); + elog(LOG, "Create directory '%s'", dirpath); + if (!dry_run) + fio_mkdir(dirpath, DIR_PERMISSION, FIO_LOCAL_HOST); } else { @@ -850,18 +865,21 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, join_path_components(to_path, dest_pgdata, file->rel_path); - elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + elog(INFO, "Create directory \"%s\" and symbolic link \"%s\"", linked_path, to_path); - /* create tablespace directory */ - if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) - elog(ERROR, "Could not create tablespace directory \"%s\": %s", - linked_path, strerror(errno)); - - /* create link to linked_path */ - if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) - elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", - linked_path, to_path, strerror(errno)); + if (!dry_run) + { + /* create tablespace directory */ + if (fio_mkdir(linked_path, file->mode, FIO_LOCAL_HOST) != 0) + elog(ERROR, "Could not create tablespace directory \"%s\": %s", + linked_path, strerror(errno)); + + /* create link to linked_path */ + if (fio_symlink(linked_path, to_path, true, FIO_LOCAL_HOST) < 0) + elog(ERROR, "Could not create symbolic link \"%s\" -> \"%s\": %s", + linked_path, to_path, strerror(errno)); + } } } @@ -923,6 +941,9 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, Assert(file->external_dir_num == 0); if (pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) redundant = true; + /* global/pg_control.pbk.bak is always keeped, because it's needed for restart failed incremental restore */ + if (pg_strcasecmp(file->rel_path, XLOG_CONTROL_BAK_FILE) == 0) + redundant = false; /* if file does not exists in destination list, then we can safely unlink it */ if (redundant) @@ -930,8 +951,11 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, char fullpath[MAXPGPATH]; join_path_components(fullpath, dest_pgdata, file->rel_path); - fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + if (!dry_run) + { + fio_delete(file->mode, fullpath, FIO_LOCAL_HOST); + } + elog(LOG, "Deleted file \"%s\"", fullpath); /* shrink dest pgdata list */ pgFileFree(file); @@ -951,6 +975,28 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (dest_filelist) parray_qsort(dest_filelist, pgFileCompareRelPathWithExternal); + join_path_components(dest_pg_control_fullpath, dest_pgdata, XLOG_CONTROL_FILE); + join_path_components(dest_pg_control_bak_fullpath, dest_pgdata, XLOG_CONTROL_BAK_FILE); + /* + * rename (if it exist) dest control file before restoring + * if it doesn't exist, that mean, that we already restoring in a previously failed + * pgdata, where XLOG_CONTROL_BAK_FILE exist + */ + if (current.backup_mode != BACKUP_MODE_FULL && !dry_run) + { + if (!fio_access(dest_pg_control_fullpath, F_OK, FIO_LOCAL_HOST)) + { + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + + if(!fio_access(dest_pg_control_bak_fullpath, F_OK, FIO_LOCAL_HOST)) + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + fio_rename(dest_pg_control_fullpath, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } + } + /* run copy threads */ elog(INFO, "Start transferring data files"); time(&start_time); @@ -961,7 +1007,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, catchup_isok = transfered_datafiles_bytes != -1; /* at last copy control file */ - if (catchup_isok) + if (catchup_isok && !dry_run) { char from_fullpath[MAXPGPATH]; char to_fullpath[MAXPGPATH]; @@ -970,9 +1016,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, copy_pgcontrol_file(from_fullpath, FIO_DB_HOST, to_fullpath, FIO_LOCAL_HOST, source_pg_control_file); transfered_datafiles_bytes += source_pg_control_file->size; + + /* Now backup control file can be deled */ + if (current.backup_mode != BACKUP_MODE_FULL && !fio_access(dest_pg_control_bak_fullpath, F_OK, FIO_LOCAL_HOST)){ + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } } - if (!catchup_isok) + if (!catchup_isok && !dry_run) { char pretty_time[20]; char pretty_transfered_data_bytes[20]; @@ -1010,14 +1065,18 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_free(stop_backup_query_text); } - wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); + if (!dry_run) + wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); #if PG_VERSION_NUM >= 90600 /* Write backup_label */ Assert(stop_backup_result.backup_label_content != NULL); - pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, - NULL); + if (!dry_run) + { + pg_stop_backup_write_file_helper(dest_pgdata, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + NULL); + } free(stop_backup_result.backup_label_content); stop_backup_result.backup_label_content = NULL; stop_backup_result.backup_label_content_len = 0; @@ -1040,6 +1099,7 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, #endif /* wait for end of wal streaming and calculate wal size transfered */ + if (!dry_run) { parray *wal_files_list = NULL; wal_files_list = parray_new(); @@ -1091,17 +1151,17 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* Sync all copied files unless '--no-sync' flag is used */ - if (sync_dest_files) + if (sync_dest_files && !dry_run) catchup_sync_destination_files(dest_pgdata, FIO_LOCAL_HOST, source_filelist, source_pg_control_file); else elog(WARNING, "Files are not synced to disk"); /* Cleanup */ - if (dest_filelist) + if (dest_filelist && !dry_run) { parray_walk(dest_filelist, pgFileFree); - parray_free(dest_filelist); } + parray_free(dest_filelist); parray_walk(source_filelist, pgFileFree); parray_free(source_filelist); pgFileFree(source_pg_control_file); diff --git a/src/checkdb.c b/src/checkdb.c index 177fc3cc7..2a7d4e9eb 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -145,7 +145,7 @@ check_files(void *arg) /* check for interrupt */ if (interrupted || thread_interrupted) - elog(ERROR, "interrupted during checkdb"); + elog(ERROR, "Interrupted during checkdb"); /* No need to check directories */ if (S_ISDIR(file->mode)) @@ -461,7 +461,9 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, "LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid " "LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid " "LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid " - "WHERE am.amname='btree' AND cls.relpersistence != 't' " + "WHERE am.amname='btree' " + "AND cls.relpersistence != 't' " + "AND cls.relkind != 'I' " "ORDER BY nmspc.nspname DESC", 0, NULL); } @@ -473,8 +475,10 @@ get_index_list(const char *dbname, bool first_db_with_amcheck, "LEFT JOIN pg_catalog.pg_class cls ON idx.indexrelid=cls.oid " "LEFT JOIN pg_catalog.pg_namespace nmspc ON cls.relnamespace=nmspc.oid " "LEFT JOIN pg_catalog.pg_am am ON cls.relam=am.oid " - "WHERE am.amname='btree' AND cls.relpersistence != 't' AND " - "(cls.reltablespace IN " + "WHERE am.amname='btree' " + "AND cls.relpersistence != 't' " + "AND cls.relkind != 'I' " + "AND (cls.reltablespace IN " "(SELECT oid from pg_catalog.pg_tablespace where spcname <> 'pg_global') " "OR cls.reltablespace = 0) " "ORDER BY nmspc.nspname DESC", @@ -746,7 +750,7 @@ do_checkdb(bool need_amcheck, if (!skip_block_validation) { if (!pgdata) - elog(ERROR, "required parameter not specified: PGDATA " + elog(ERROR, "Required parameter not specified: PGDATA " "(-D, --pgdata)"); /* get node info */ diff --git a/src/configure.c b/src/configure.c index 9ffe2d7a7..964548343 100644 --- a/src/configure.c +++ b/src/configure.c @@ -17,10 +17,14 @@ static void assign_log_level_console(ConfigOption *opt, const char *arg); static void assign_log_level_file(ConfigOption *opt, const char *arg); +static void assign_log_format_console(ConfigOption *opt, const char *arg); +static void assign_log_format_file(ConfigOption *opt, const char *arg); static void assign_compress_alg(ConfigOption *opt, const char *arg); static char *get_log_level_console(ConfigOption *opt); static char *get_log_level_file(ConfigOption *opt); +static char *get_log_format_console(ConfigOption *opt); +static char *get_log_format_file(ConfigOption *opt); static char *get_compress_alg(ConfigOption *opt); static void show_configure_start(void); @@ -49,7 +53,7 @@ ConfigOption instance_options[] = /* Instance options */ { 's', 'D', "pgdata", - &instance_config.pgdata, SOURCE_CMD, 0, + &instance_config.pgdata, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, { @@ -66,49 +70,49 @@ ConfigOption instance_options[] = #endif { 's', 'E', "external-dirs", - &instance_config.external_dir_str, SOURCE_CMD, 0, + &instance_config.external_dir_str, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, /* Connection options */ { 's', 'd', "pgdatabase", - &instance_config.conn_opt.pgdatabase, SOURCE_CMD, 0, + &instance_config.conn_opt.pgdatabase, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'h', "pghost", - &instance_config.conn_opt.pghost, SOURCE_CMD, 0, + &instance_config.conn_opt.pghost, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'p', "pgport", - &instance_config.conn_opt.pgport, SOURCE_CMD, 0, + &instance_config.conn_opt.pgport, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'U', "pguser", - &instance_config.conn_opt.pguser, SOURCE_CMD, 0, + &instance_config.conn_opt.pguser, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, /* Replica options */ { 's', 202, "master-db", - &instance_config.master_conn_opt.pgdatabase, SOURCE_CMD, 0, + &instance_config.master_conn_opt.pgdatabase, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 203, "master-host", - &instance_config.master_conn_opt.pghost, SOURCE_CMD, 0, + &instance_config.master_conn_opt.pghost, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 204, "master-port", - &instance_config.master_conn_opt.pgport, SOURCE_CMD, 0, + &instance_config.master_conn_opt.pgport, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 205, "master-user", - &instance_config.master_conn_opt.pguser, SOURCE_CMD, 0, + &instance_config.master_conn_opt.pguser, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { @@ -124,17 +128,17 @@ ConfigOption instance_options[] = }, { 's', 208, "archive-host", - &instance_config.archive.host, SOURCE_CMD, 0, + &instance_config.archive.host, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 209, "archive-port", - &instance_config.archive.port, SOURCE_CMD, 0, + &instance_config.archive.port, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 210, "archive-user", - &instance_config.archive.user, SOURCE_CMD, 0, + &instance_config.archive.user, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { @@ -145,100 +149,110 @@ ConfigOption instance_options[] = /* Logging options */ { 'f', 212, "log-level-console", - assign_log_level_console, SOURCE_CMD, 0, + assign_log_level_console, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, get_log_level_console }, { 'f', 213, "log-level-file", - assign_log_level_file, SOURCE_CMD, 0, + assign_log_level_file, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, get_log_level_file }, { - 's', 214, "log-filename", - &instance_config.logger.log_filename, SOURCE_CMD, 0, + 'f', 214, "log-format-console", + assign_log_format_console, SOURCE_CMD_STRICT, SOURCE_DEFAULT, + OPTION_LOG_GROUP, 0, get_log_format_console + }, + { + 'f', 215, "log-format-file", + assign_log_format_file, SOURCE_CMD, SOURCE_DEFAULT, + OPTION_LOG_GROUP, 0, get_log_format_file + }, + { + 's', 216, "log-filename", + &instance_config.logger.log_filename, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 215, "error-log-filename", - &instance_config.logger.error_log_filename, SOURCE_CMD, 0, + 's', 217, "error-log-filename", + &instance_config.logger.error_log_filename, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 216, "log-directory", - &instance_config.logger.log_directory, SOURCE_CMD, 0, + 's', 218, "log-directory", + &instance_config.logger.log_directory, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { - 'U', 217, "log-rotation-size", + 'U', 219, "log-rotation-size", &instance_config.logger.log_rotation_size, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, OPTION_UNIT_KB, option_get_value }, { - 'U', 218, "log-rotation-age", + 'U', 220, "log-rotation-age", &instance_config.logger.log_rotation_age, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, OPTION_UNIT_MS, option_get_value }, /* Retention options */ { - 'u', 219, "retention-redundancy", - &instance_config.retention_redundancy, SOURCE_CMD, 0, + 'u', 221, "retention-redundancy", + &instance_config.retention_redundancy, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, { - 'u', 220, "retention-window", - &instance_config.retention_window, SOURCE_CMD, 0, + 'u', 222, "retention-window", + &instance_config.retention_window, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, { - 'u', 221, "wal-depth", - &instance_config.wal_depth, SOURCE_CMD, 0, + 'u', 223, "wal-depth", + &instance_config.wal_depth, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, /* Compression options */ { - 'f', 222, "compress-algorithm", - assign_compress_alg, SOURCE_CMD, 0, + 'f', 224, "compress-algorithm", + assign_compress_alg, SOURCE_CMD, SOURCE_DEFAULT, OPTION_COMPRESS_GROUP, 0, get_compress_alg }, { - 'u', 223, "compress-level", - &instance_config.compress_level, SOURCE_CMD, 0, + 'u', 225, "compress-level", + &instance_config.compress_level, SOURCE_CMD, SOURCE_DEFAULT, OPTION_COMPRESS_GROUP, 0, option_get_value }, /* Remote backup options */ { - 's', 224, "remote-proto", - &instance_config.remote.proto, SOURCE_CMD, 0, + 's', 226, "remote-proto", + &instance_config.remote.proto, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 225, "remote-host", - &instance_config.remote.host, SOURCE_CMD, 0, + 's', 227, "remote-host", + &instance_config.remote.host, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 226, "remote-port", - &instance_config.remote.port, SOURCE_CMD, 0, + 's', 228, "remote-port", + &instance_config.remote.port, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 227, "remote-path", - &instance_config.remote.path, SOURCE_CMD, 0, + 's', 229, "remote-path", + &instance_config.remote.path, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 228, "remote-user", - &instance_config.remote.user, SOURCE_CMD, 0, + 's', 230, "remote-user", + &instance_config.remote.user, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 229, "ssh-options", - &instance_config.remote.ssh_options, SOURCE_CMD, 0, + 's', 231, "ssh-options", + &instance_config.remote.ssh_options, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 230, "ssh-config", - &instance_config.remote.ssh_config, SOURCE_CMD, 0, + 's', 232, "ssh-config", + &instance_config.remote.ssh_config, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 0 } @@ -255,7 +269,7 @@ static const char *current_group = NULL; * Show configure options including default values. */ void -do_show_config(void) +do_show_config(bool show_base_units) { int i; @@ -263,10 +277,13 @@ do_show_config(void) for (i = 0; instance_options[i].type; i++) { + if (show_base_units && strchr("bBiIuU", instance_options[i].type) && instance_options[i].get_value == *option_get_value) + instance_options[i].flags |= GET_VAL_IN_BASE_UNITS; /* Set flag */ if (show_format == SHOW_PLAIN) show_configure_plain(&instance_options[i]); else show_configure_json(&instance_options[i]); + instance_options[i].flags &= ~(GET_VAL_IN_BASE_UNITS); /* Reset flag. It was resetted in option_get_value(). Probably this reset isn't needed */ } show_configure_end(); @@ -388,6 +405,8 @@ readInstanceConfigFile(InstanceState *instanceState) InstanceConfig *instance = pgut_new(InstanceConfig); char *log_level_console = NULL; char *log_level_file = NULL; + char *log_format_console = NULL; + char *log_format_file = NULL; char *compress_alg = NULL; int parsed_options; @@ -396,7 +415,7 @@ readInstanceConfigFile(InstanceState *instanceState) /* Instance options */ { 's', 'D', "pgdata", - &instance->pgdata, SOURCE_CMD, 0, + &instance->pgdata, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, { @@ -413,49 +432,49 @@ readInstanceConfigFile(InstanceState *instanceState) #endif { 's', 'E', "external-dirs", - &instance->external_dir_str, SOURCE_CMD, 0, + &instance->external_dir_str, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, /* Connection options */ { 's', 'd', "pgdatabase", - &instance->conn_opt.pgdatabase, SOURCE_CMD, 0, + &instance->conn_opt.pgdatabase, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'h', "pghost", - &instance->conn_opt.pghost, SOURCE_CMD, 0, + &instance->conn_opt.pghost, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'p', "pgport", - &instance->conn_opt.pgport, SOURCE_CMD, 0, + &instance->conn_opt.pgport, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, { 's', 'U', "pguser", - &instance->conn_opt.pguser, SOURCE_CMD, 0, + &instance->conn_opt.pguser, SOURCE_CMD, SOURCE_DEFAULT, OPTION_CONN_GROUP, 0, option_get_value }, /* Replica options */ { 's', 202, "master-db", - &instance->master_conn_opt.pgdatabase, SOURCE_CMD, 0, + &instance->master_conn_opt.pgdatabase, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 203, "master-host", - &instance->master_conn_opt.pghost, SOURCE_CMD, 0, + &instance->master_conn_opt.pghost, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 204, "master-port", - &instance->master_conn_opt.pgport, SOURCE_CMD, 0, + &instance->master_conn_opt.pgport, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { 's', 205, "master-user", - &instance->master_conn_opt.pguser, SOURCE_CMD, 0, + &instance->master_conn_opt.pguser, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REPLICA_GROUP, 0, option_get_value }, { @@ -471,129 +490,139 @@ readInstanceConfigFile(InstanceState *instanceState) }, { 's', 208, "archive-host", - &instance_config.archive.host, SOURCE_CMD, 0, + &instance_config.archive.host, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 209, "archive-port", - &instance_config.archive.port, SOURCE_CMD, 0, + &instance_config.archive.port, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 210, "archive-user", - &instance_config.archive.user, SOURCE_CMD, 0, + &instance_config.archive.user, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, { 's', 211, "restore-command", - &instance->restore_command, SOURCE_CMD, 0, + &instance->restore_command, SOURCE_CMD, SOURCE_DEFAULT, OPTION_ARCHIVE_GROUP, 0, option_get_value }, /* Instance options */ { 's', 'D', "pgdata", - &instance->pgdata, SOURCE_CMD, 0, + &instance->pgdata, SOURCE_CMD, SOURCE_DEFAULT, OPTION_INSTANCE_GROUP, 0, option_get_value }, /* Logging options */ { 's', 212, "log-level-console", - &log_level_console, SOURCE_CMD, 0, + &log_level_console, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { 's', 213, "log-level-file", - &log_level_file, SOURCE_CMD, 0, + &log_level_file, SOURCE_CMD, SOURCE_DEFAULT, + OPTION_LOG_GROUP, 0, option_get_value + }, + { + 's', 214, "log-format-console", + &log_format_console, SOURCE_CMD_STRICT, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 214, "log-filename", - &instance->logger.log_filename, SOURCE_CMD, 0, + 's', 215, "log-format-file", + &log_format_file, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 215, "error-log-filename", - &instance->logger.error_log_filename, SOURCE_CMD, 0, + 's', 216, "log-filename", + &instance->logger.log_filename, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { - 's', 216, "log-directory", - &instance->logger.log_directory, SOURCE_CMD, 0, + 's', 217, "error-log-filename", + &instance->logger.error_log_filename, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { - 'U', 217, "log-rotation-size", + 's', 218, "log-directory", + &instance->logger.log_directory, SOURCE_CMD, SOURCE_DEFAULT, + OPTION_LOG_GROUP, 0, option_get_value + }, + { + 'U', 219, "log-rotation-size", &instance->logger.log_rotation_size, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, OPTION_UNIT_KB, option_get_value }, { - 'U', 218, "log-rotation-age", + 'U', 220, "log-rotation-age", &instance->logger.log_rotation_age, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, OPTION_UNIT_MS, option_get_value }, /* Retention options */ { - 'u', 219, "retention-redundancy", - &instance->retention_redundancy, SOURCE_CMD, 0, + 'u', 221, "retention-redundancy", + &instance->retention_redundancy, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, { - 'u', 220, "retention-window", - &instance->retention_window, SOURCE_CMD, 0, + 'u', 222, "retention-window", + &instance->retention_window, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, { - 'u', 221, "wal-depth", - &instance->wal_depth, SOURCE_CMD, 0, + 'u', 223, "wal-depth", + &instance->wal_depth, SOURCE_CMD, SOURCE_DEFAULT, OPTION_RETENTION_GROUP, 0, option_get_value }, /* Compression options */ { - 's', 222, "compress-algorithm", - &compress_alg, SOURCE_CMD, 0, + 's', 224, "compress-algorithm", + &compress_alg, SOURCE_CMD, SOURCE_DEFAULT, OPTION_LOG_GROUP, 0, option_get_value }, { - 'u', 223, "compress-level", - &instance->compress_level, SOURCE_CMD, 0, + 'u', 225, "compress-level", + &instance->compress_level, SOURCE_CMD, SOURCE_DEFAULT, OPTION_COMPRESS_GROUP, 0, option_get_value }, /* Remote backup options */ { - 's', 224, "remote-proto", - &instance->remote.proto, SOURCE_CMD, 0, + 's', 226, "remote-proto", + &instance->remote.proto, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 225, "remote-host", - &instance->remote.host, SOURCE_CMD, 0, + 's', 227, "remote-host", + &instance->remote.host, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 226, "remote-port", - &instance->remote.port, SOURCE_CMD, 0, + 's', 228, "remote-port", + &instance->remote.port, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 227, "remote-path", - &instance->remote.path, SOURCE_CMD, 0, + 's', 229, "remote-path", + &instance->remote.path, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 228, "remote-user", - &instance->remote.user, SOURCE_CMD, 0, + 's', 230, "remote-user", + &instance->remote.user, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 229, "ssh-options", - &instance->remote.ssh_options, SOURCE_CMD, 0, + 's', 231, "ssh-options", + &instance->remote.ssh_options, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { - 's', 230, "ssh-config", - &instance->remote.ssh_config, SOURCE_CMD, 0, + 's', 232, "ssh-config", + &instance->remote.ssh_config, SOURCE_CMD, SOURCE_DEFAULT, OPTION_REMOTE_GROUP, 0, option_get_value }, { 0 } @@ -625,6 +654,12 @@ readInstanceConfigFile(InstanceState *instanceState) if (log_level_file) instance->logger.log_level_file = parse_log_level(log_level_file); + if (log_format_console) + instance->logger.log_format_console = parse_log_format(log_format_console); + + if (log_format_file) + instance->logger.log_format_file = parse_log_format(log_format_file); + if (compress_alg) instance->compress_alg = parse_compress_alg(compress_alg); @@ -649,6 +684,18 @@ assign_log_level_file(ConfigOption *opt, const char *arg) instance_config.logger.log_level_file = parse_log_level(arg); } +static void +assign_log_format_console(ConfigOption *opt, const char *arg) +{ + instance_config.logger.log_format_console = parse_log_format(arg); +} + +static void +assign_log_format_file(ConfigOption *opt, const char *arg) +{ + instance_config.logger.log_format_file = parse_log_format(arg); +} + static void assign_compress_alg(ConfigOption *opt, const char *arg) { @@ -667,6 +714,18 @@ get_log_level_file(ConfigOption *opt) return pstrdup(deparse_log_level(instance_config.logger.log_level_file)); } +static char * +get_log_format_console(ConfigOption *opt) +{ + return pstrdup(deparse_log_format(instance_config.logger.log_format_console)); +} + +static char * +get_log_format_file(ConfigOption *opt) +{ + return pstrdup(deparse_log_format(instance_config.logger.log_format_file)); +} + static char * get_compress_alg(ConfigOption *opt) { @@ -745,6 +804,6 @@ show_configure_json(ConfigOption *opt) return; json_add_value(&show_buf, opt->lname, value, json_level, - true); + !(opt->flags & GET_VAL_IN_BASE_UNITS)); pfree(value); } diff --git a/src/data.c b/src/data.c index f02e3fd14..1a9616bae 100644 --- a/src/data.c +++ b/src/data.c @@ -3,7 +3,7 @@ * data.c: utils to parse and backup data pages * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -142,7 +142,7 @@ page_may_be_compressed(Page page, CompressAlg alg, uint32 backup_version) phdr = (PageHeader) page; /* First check if page header is valid (it seems to be fast enough check) */ - if (!(PageGetPageSize(phdr) == BLCKSZ && + if (!(PageGetPageSize(page) == BLCKSZ && // PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION && (phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 && phdr->pd_lower >= SizeOfPageHeaderData && @@ -181,7 +181,7 @@ parse_page(Page page, XLogRecPtr *lsn) /* Get lsn from page header */ *lsn = PageXLogRecPtrGet(phdr->pd_lsn); - if (PageGetPageSize(phdr) == BLCKSZ && + if (PageGetPageSize(page) == BLCKSZ && // PageGetPageLayoutVersion(phdr) == PG_PAGE_LAYOUT_VERSION && (phdr->pd_flags & ~PD_VALID_FLAG_BITS) == 0 && phdr->pd_lower >= SizeOfPageHeaderData && @@ -203,10 +203,10 @@ get_header_errormsg(Page page, char **errormsg) PageHeader phdr = (PageHeader) page; *errormsg = pgut_malloc(ERRMSG_MAX_LEN); - if (PageGetPageSize(phdr) != BLCKSZ) + if (PageGetPageSize(page) != BLCKSZ) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " "page size %lu is not equal to block size %u", - PageGetPageSize(phdr), BLCKSZ); + PageGetPageSize(page), BLCKSZ); else if (phdr->pd_lower < SizeOfPageHeaderData) snprintf(*errormsg, ERRMSG_MAX_LEN, "page header invalid, " @@ -349,6 +349,8 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn, Assert(false); } } + /* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */ + fflush(in); } /* @@ -694,7 +696,7 @@ catchup_data_file(pgFile *file, const char *from_fullpath, const char *to_fullpa use_pagemap = true; if (use_pagemap) - elog(VERBOSE, "Using pagemap for file \"%s\"", file->rel_path); + elog(LOG, "Using pagemap for file \"%s\"", file->rel_path); /* Remote mode */ if (fio_is_remote(FIO_DB_HOST)) @@ -793,19 +795,28 @@ backup_non_data_file(pgFile *file, pgFile *prev_file, } /* - * If nonedata file exists in previous backup + * If non-data file exists in previous backup * and its mtime is less than parent backup start time ... */ if ((pg_strcasecmp(file->name, RELMAPPER_FILENAME) != 0) && (prev_file && file->exists_in_prev && + file->size == prev_file->size && file->mtime <= parent_backup_time)) { - - file->crc = fio_get_crc32(from_fullpath, FIO_DB_HOST, false); + /* + * file could be deleted under our feets. + * But then backup_non_data_file_internal will handle it safely + */ + if (file->forkName != cfm) + file->crc = fio_get_crc32(from_fullpath, FIO_DB_HOST, false, true); + else + file->crc = fio_get_crc32_truncated(from_fullpath, FIO_DB_HOST, true); /* ...and checksum is the same... */ if (EQ_TRADITIONAL_CRC32(file->crc, prev_file->crc)) { file->write_size = BYTES_INVALID; + /* get full size from previous backup for unchanged file */ + file->uncompressed_size = prev_file->uncompressed_size; return; /* ...skip copying file. */ } } @@ -1195,7 +1206,7 @@ restore_data_file_internal(FILE *in, FILE *out, pgFile *file, uint32 backup_vers datapagemap_add(map, blknum); } - elog(VERBOSE, "Copied file \"%s\": %lu bytes", from_fullpath, write_len); + elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, write_len); return write_len; } @@ -1218,7 +1229,7 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, /* check for interrupt */ if (interrupted || thread_interrupted) - elog(ERROR, "Interrupted during nonedata file restore"); + elog(ERROR, "Interrupted during non-data file restore"); read_len = fread(buf, 1, STDIO_BUFSIZE, in); @@ -1239,7 +1250,7 @@ restore_non_data_file_internal(FILE *in, FILE *out, pgFile *file, pg_free(buf); - elog(VERBOSE, "Copied file \"%s\": %lu bytes", from_fullpath, file->write_size); + elog(LOG, "Copied file \"%s\": %lu bytes", from_fullpath, file->write_size); } size_t @@ -1284,8 +1295,8 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, */ if (!tmp_file) { - elog(ERROR, "Failed to locate nonedata file \"%s\" in backup %s", - dest_file->rel_path, base36enc(tmp_backup->start_time)); + elog(ERROR, "Failed to locate non-data file \"%s\" in backup %s", + dest_file->rel_path, backup_id_of(tmp_backup)); continue; } @@ -1309,27 +1320,32 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, /* sanity */ if (!tmp_backup) - elog(ERROR, "Failed to locate a backup containing full copy of nonedata file \"%s\"", + elog(ERROR, "Failed to locate a backup containing full copy of non-data file \"%s\"", to_fullpath); if (!tmp_file) - elog(ERROR, "Failed to locate a full copy of nonedata file \"%s\"", to_fullpath); + elog(ERROR, "Failed to locate a full copy of non-data file \"%s\"", to_fullpath); if (tmp_file->write_size <= 0) - elog(ERROR, "Full copy of nonedata file has invalid size: %li. " + elog(ERROR, "Full copy of non-data file has invalid size: %li. " "Metadata corruption in backup %s in file: \"%s\"", - tmp_file->write_size, base36enc(tmp_backup->start_time), + tmp_file->write_size, backup_id_of(tmp_backup), to_fullpath); /* incremental restore */ if (already_exists) { /* compare checksums of already existing file and backup file */ - pg_crc32 file_crc = fio_get_crc32(to_fullpath, FIO_DB_HOST, false); + pg_crc32 file_crc; + if (tmp_file->forkName == cfm && + tmp_file->uncompressed_size > tmp_file->write_size) + file_crc = fio_get_crc32_truncated(to_fullpath, FIO_DB_HOST, false); + else + file_crc = fio_get_crc32(to_fullpath, FIO_DB_HOST, false, false); if (file_crc == tmp_file->crc) { - elog(VERBOSE, "Already existing nonedata file \"%s\" has the same checksum, skip restore", + elog(LOG, "Already existing non-data file \"%s\" has the same checksum, skip restore", to_fullpath); return 0; } @@ -1357,7 +1373,7 @@ restore_non_data_file(parray *parent_chain, pgBackup *dest_backup, elog(ERROR, "Cannot open backup file \"%s\": %s", from_fullpath, strerror(errno)); - /* disable stdio buffering for nonedata files */ + /* disable stdio buffering for non-data files */ setvbuf(in, NULL, _IONBF, BUFSIZ); /* do actual work */ @@ -1382,10 +1398,12 @@ backup_non_data_file_internal(const char *from_fullpath, const char *to_fullpath, pgFile *file, bool missing_ok) { - FILE *in = NULL; FILE *out = NULL; - ssize_t read_len = 0; - char *buf = NULL; + char *errmsg = NULL; + int rc; + bool cut_zero_tail; + + cut_zero_tail = file->forkName == cfm; INIT_FILE_CRC32(true, file->crc); @@ -1407,107 +1425,44 @@ backup_non_data_file_internal(const char *from_fullpath, /* backup remote file */ if (fio_is_remote(FIO_DB_HOST)) - { - char *errmsg = NULL; - int rc = fio_send_file(from_fullpath, to_fullpath, out, file, &errmsg); + rc = fio_send_file(from_fullpath, out, cut_zero_tail, file, &errmsg); + else + rc = fio_send_file_local(from_fullpath, out, cut_zero_tail, file, &errmsg); - /* handle errors */ - if (rc == FILE_MISSING) - { - /* maybe deleted, it's not error in case of backup */ - if (missing_ok) - { - elog(LOG, "File \"%s\" is not found", from_fullpath); - file->write_size = FILE_NOT_FOUND; - goto cleanup; - } - else - elog(ERROR, "File \"%s\" is not found", from_fullpath); - } - else if (rc == WRITE_FAILED) - elog(ERROR, "Cannot write to \"%s\": %s", to_fullpath, strerror(errno)); - else if (rc != SEND_OK) + /* handle errors */ + if (rc == FILE_MISSING) + { + /* maybe deleted, it's not error in case of backup */ + if (missing_ok) { - if (errmsg) - elog(ERROR, "%s", errmsg); - else - elog(ERROR, "Cannot access remote file \"%s\"", from_fullpath); + elog(LOG, "File \"%s\" is not found", from_fullpath); + file->write_size = FILE_NOT_FOUND; + goto cleanup; } - - pg_free(errmsg); + else + elog(ERROR, "File \"%s\" is not found", from_fullpath); } - /* backup local file */ - else + else if (rc == WRITE_FAILED) + elog(ERROR, "Cannot write to \"%s\": %s", to_fullpath, strerror(errno)); + else if (rc != SEND_OK) { - /* open source file for read */ - in = fopen(from_fullpath, PG_BINARY_R); - if (in == NULL) - { - /* maybe deleted, it's not error in case of backup */ - if (errno == ENOENT) - { - if (missing_ok) - { - elog(LOG, "File \"%s\" is not found", from_fullpath); - file->write_size = FILE_NOT_FOUND; - goto cleanup; - } - else - elog(ERROR, "File \"%s\" is not found", from_fullpath); - } - - elog(ERROR, "Cannot open file \"%s\": %s", from_fullpath, - strerror(errno)); - } - - /* disable stdio buffering for local input/output files to avoid triple buffering */ - setvbuf(in, NULL, _IONBF, BUFSIZ); - setvbuf(out, NULL, _IONBF, BUFSIZ); - - /* allocate 64kB buffer */ - buf = pgut_malloc(CHUNK_SIZE); - - /* copy content and calc CRC */ - for (;;) - { - read_len = fread(buf, 1, CHUNK_SIZE, in); - - if (ferror(in)) - elog(ERROR, "Cannot read from file \"%s\": %s", - from_fullpath, strerror(errno)); - - if (read_len > 0) - { - if (fwrite(buf, 1, read_len, out) != read_len) - elog(ERROR, "Cannot write to file \"%s\": %s", to_fullpath, - strerror(errno)); - - /* update CRC */ - COMP_FILE_CRC32(true, file->crc, buf, read_len); - file->read_size += read_len; - } - - if (feof(in)) - break; - } + if (errmsg) + elog(ERROR, "%s", errmsg); + else + elog(ERROR, "Cannot access remote file \"%s\"", from_fullpath); } - file->write_size = (int64) file->read_size; - - if (file->write_size > 0) - file->uncompressed_size = file->write_size; + file->uncompressed_size = file->read_size; cleanup: + if (errmsg != NULL) + pg_free(errmsg); + /* finish CRC calculation and store into pgFile */ FIN_FILE_CRC32(true, file->crc); - if (in && fclose(in)) - elog(ERROR, "Cannot close the file \"%s\": %s", from_fullpath, strerror(errno)); - if (out && fclose(out)) elog(ERROR, "Cannot close the file \"%s\": %s", to_fullpath, strerror(errno)); - - pg_free(buf); } /* @@ -1681,7 +1636,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, int n_hdr = -1; off_t cur_pos_in = 0; - elog(VERBOSE, "Validate relation blocks for file \"%s\"", fullpath); + elog(LOG, "Validate relation blocks for file \"%s\"", fullpath); /* should not be possible */ Assert(!(backup_version >= 20400 && file->n_headers <= 0)); @@ -1740,7 +1695,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, elog(ERROR, "Cannot seek block %u of \"%s\": %s", blknum, fullpath, strerror(errno)); else - elog(INFO, "Seek to %u", headers[n_hdr].pos); + elog(VERBOSE, "Seek to %u", headers[n_hdr].pos); cur_pos_in = headers[n_hdr].pos; } @@ -1764,7 +1719,7 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, /* backward compatibility kludge TODO: remove in 3.0 */ if (compressed_size == PageIsTruncated) { - elog(INFO, "Block %u of \"%s\" is truncated", + elog(VERBOSE, "Block %u of \"%s\" is truncated", blknum, fullpath); continue; } @@ -1835,10 +1790,10 @@ validate_file_pages(pgFile *file, const char *fullpath, XLogRecPtr stop_lsn, switch (rc) { case PAGE_IS_NOT_FOUND: - elog(LOG, "File \"%s\", block %u, page is NULL", file->rel_path, blknum); + elog(VERBOSE, "File \"%s\", block %u, page is NULL", file->rel_path, blknum); break; case PAGE_IS_ZEROED: - elog(LOG, "File: %s blknum %u, empty zeroed page", file->rel_path, blknum); + elog(VERBOSE, "File: %s blknum %u, empty zeroed page", file->rel_path, blknum); break; case PAGE_HEADER_IS_INVALID: elog(WARNING, "Page header is looking insane: %s, block %i", file->rel_path, blknum); @@ -2030,10 +1985,10 @@ get_page_header(FILE *in, const char *fullpath, BackupPageHeader* bph, return false; /* EOF found */ else if (read_len != 0 && feof(in)) elog(ERROR, - "Odd size page found at offset %lu of \"%s\"", + "Odd size page found at offset %ld of \"%s\"", ftello(in), fullpath); else - elog(ERROR, "Cannot read header at offset %lu of \"%s\": %s", + elog(ERROR, "Cannot read header at offset %ld of \"%s\": %s", ftello(in), fullpath, strerror(errno)); } @@ -2321,7 +2276,7 @@ copy_pages(const char *to_fullpath, const char *from_fullpath, elog(ERROR, "Cannot seek to end of file position in destination file \"%s\": %s", to_fullpath, strerror(errno)); { - size_t pos = ftell(out); + long pos = ftell(out); if (pos < 0) elog(ERROR, "Cannot get position in destination file \"%s\": %s", @@ -2535,7 +2490,10 @@ write_page_headers(BackupPageHeader2 *headers, pgFile *file, HeaderMap *hdr_map, file->rel_path, file->hdr_off, z_len, file->hdr_crc); if (fwrite(zheaders, 1, z_len, hdr_map->fp) != z_len) + { + pthread_mutex_unlock(&(hdr_map->mutex)); elog(ERROR, "Cannot write to file \"%s\": %s", map_path, strerror(errno)); + } file->hdr_size = z_len; /* save the length of compressed headers */ hdr_map->offset += z_len; /* update current offset in map */ diff --git a/src/datapagemap.h b/src/datapagemap.h index 6af54713b..6ad7a6204 100644 --- a/src/datapagemap.h +++ b/src/datapagemap.h @@ -9,7 +9,12 @@ #ifndef DATAPAGEMAP_H #define DATAPAGEMAP_H +#if PG_VERSION_NUM < 160000 #include "storage/relfilenode.h" +#else +#include "storage/relfilelocator.h" +#define RelFileNode RelFileLocator +#endif #include "storage/block.h" diff --git a/src/delete.c b/src/delete.c index 6c70ff81e..f48ecc95f 100644 --- a/src/delete.c +++ b/src/delete.c @@ -36,7 +36,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) parray *backup_list, *delete_list; pgBackup *target_backup = NULL; - size_t size_to_delete = 0; + int64 size_to_delete = 0; char size_to_delete_pretty[20]; /* Get complete list of backups */ @@ -71,7 +71,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) parray_append(delete_list, backup); elog(LOG, "Backup %s %s be deleted", - base36enc(backup->start_time), dry_run? "can":"will"); + backup_id_of(backup), dry_run? "can":"will"); size_to_delete += backup->data_bytes; if (backup->stream) @@ -84,7 +84,7 @@ do_delete(InstanceState *instanceState, time_t backup_id) { pretty_size(size_to_delete, size_to_delete_pretty, lengthof(size_to_delete_pretty)); elog(INFO, "Resident data size to free by delete of backup %s : %s", - base36enc(target_backup->start_time), size_to_delete_pretty); + backup_id_of(target_backup), size_to_delete_pretty); } if (!dry_run) @@ -158,7 +158,13 @@ void do_retention(InstanceState *instanceState, bool no_validate, bool no_sync) /* Retention is disabled but we still can cleanup wal */ elog(WARNING, "Retention policy is not set"); if (!delete_wal) + { + parray_walk(backup_list, pgBackupFree); + parray_free(backup_list); + parray_free(to_keep_list); + parray_free(to_purge_list); return; + } } else /* At least one retention policy is active */ @@ -321,12 +327,12 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg time2iso(expire_timestamp, lengthof(expire_timestamp), backup->expire_time, false); elog(LOG, "Backup %s is pinned until '%s', retain", - base36enc(backup->start_time), expire_timestamp); + backup_id_of(backup), expire_timestamp); continue; } /* Add backup to purge_list */ - elog(VERBOSE, "Mark backup %s for purge.", base36enc(backup->start_time)); + elog(VERBOSE, "Mark backup %s for purge.", backup_id_of(backup)); parray_append(to_purge_list, backup); continue; } @@ -406,7 +412,7 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg /* TODO: add ancestor(chain full backup) ID */ elog(INFO, "Backup %s, mode: %s, status: %s. Redundancy: %i/%i, Time Window: %ud/%ud. %s", - base36enc(backup->start_time), + backup_id_of(backup), pgBackupGetBackupMode(backup, false), status2str(backup->status), cur_full_backup_num, @@ -451,7 +457,6 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, /* Merging happens here */ for (i = 0; i < parray_num(to_keep_list); i++) { - char *keep_backup_id = NULL; pgBackup *full_backup = NULL; parray *merge_list = NULL; @@ -461,7 +466,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, if (!keep_backup) continue; - elog(INFO, "Consider backup %s for merge", base36enc(keep_backup->start_time)); + elog(INFO, "Consider backup %s for merge", backup_id_of(keep_backup)); /* Got valid incremental backup, find its FULL ancestor */ full_backup = find_parent_full_backup(keep_backup); @@ -469,7 +474,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, /* Failed to find parent */ if (!full_backup) { - elog(WARNING, "Failed to find FULL parent for %s", base36enc(keep_backup->start_time)); + elog(WARNING, "Failed to find FULL parent for %s", backup_id_of(keep_backup)); continue; } @@ -479,7 +484,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, pgBackupCompareIdDesc)) { elog(WARNING, "Skip backup %s for merging, " - "because his FULL parent is not marked for purge", base36enc(keep_backup->start_time)); + "because his FULL parent is not marked for purge", backup_id_of(keep_backup)); continue; } @@ -488,10 +493,9 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, * backups from purge_list. */ - keep_backup_id = base36enc_dup(keep_backup->start_time); elog(INFO, "Merge incremental chain between full backup %s and backup %s", - base36enc(full_backup->start_time), keep_backup_id); - pg_free(keep_backup_id); + backup_id_of(full_backup), + backup_id_of(keep_backup)); merge_list = parray_new(); @@ -533,7 +537,7 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, // if (is_prolific(backup_list, full_backup)) // { // elog(WARNING, "Backup %s has multiple valid descendants. " -// "Automatic merge is not possible.", base36enc(full_backup->start_time)); +// "Automatic merge is not possible.", backup_id_of(full_backup)); // } /* Merge list example: @@ -554,12 +558,17 @@ do_retention_merge(InstanceState *instanceState, parray *backup_list, /* Try to remove merged incremental backup from both keep and purge lists */ parray_rm(to_purge_list, tmp_backup, pgBackupCompareId); - parray_set(to_keep_list, i, NULL); + for (i = 0; i < parray_num(to_keep_list); i++) + if (parray_get(to_keep_list, i) == tmp_backup) + { + parray_set(to_keep_list, i, NULL); + break; + } } if (!no_validate) pgBackupValidate(full_backup, NULL); if (full_backup->status == BACKUP_STATUS_CORRUPT) - elog(ERROR, "Merging of backup %s failed", base36enc(full_backup->start_time)); + elog(ERROR, "Merging of backup %s failed", backup_id_of(full_backup)); /* Cleanup */ parray_free(merge_list); @@ -591,7 +600,7 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) pgBackup *delete_backup = (pgBackup *) parray_get(to_purge_list, j); elog(LOG, "Consider backup %s for purge", - base36enc(delete_backup->start_time)); + backup_id_of(delete_backup)); /* Evaluate marked for delete backup against every backup in keep list. * If marked for delete backup is recognized as parent of one of those, @@ -599,8 +608,6 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) */ for (i = 0; i < parray_num(to_keep_list); i++) { - char *keeped_backup_id; - pgBackup *keep_backup = (pgBackup *) parray_get(to_keep_list, i); /* item could have been nullified in merge */ @@ -611,10 +618,9 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) if (keep_backup->backup_mode == BACKUP_MODE_FULL) continue; - keeped_backup_id = base36enc_dup(keep_backup->start_time); - elog(LOG, "Check if backup %s is parent of backup %s", - base36enc(delete_backup->start_time), keeped_backup_id); + backup_id_of(delete_backup), + backup_id_of(keep_backup)); if (is_parent(delete_backup->start_time, keep_backup, true)) { @@ -622,13 +628,12 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) /* We must not delete this backup, evict it from purge list */ elog(LOG, "Retain backup %s because his " "descendant %s is guarded by retention", - base36enc(delete_backup->start_time), keeped_backup_id); + backup_id_of(delete_backup), + backup_id_of(keep_backup)); purge = false; - pg_free(keeped_backup_id); break; } - pg_free(keeped_backup_id); } /* Retain backup */ @@ -640,7 +645,7 @@ do_retention_purge(parray *to_keep_list, parray *to_purge_list) { /* If the backup still is used, do not interrupt and go to the next */ elog(WARNING, "Cannot lock backup %s directory, skip purging", - base36enc(delete_backup->start_time)); + backup_id_of(delete_backup)); continue; } @@ -682,12 +687,11 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) * at least one backup and no file should be removed. * Unless wal-depth is enabled. */ - if ((tlinfo->closest_backup) && instance_config.wal_depth <= 0) + if ((tlinfo->closest_backup) && instance_config.wal_depth == 0) continue; /* WAL retention keeps this timeline from purge */ - if (instance_config.wal_depth >= 0 && tlinfo->anchor_tli > 0 && - tlinfo->anchor_tli != tlinfo->tli) + if (tlinfo->anchor_tli > 0 && tlinfo->anchor_tli != tlinfo->tli) continue; /* @@ -701,7 +705,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) */ if (tlinfo->oldest_backup) { - if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) + if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) { delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); @@ -714,7 +718,7 @@ do_retention_wal(InstanceState *instanceState, bool dry_run) } else { - if (instance_config.wal_depth >= 0 && !(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) + if (!(XLogRecPtrIsInvalid(tlinfo->anchor_lsn))) delete_walfiles_in_tli(instanceState, tlinfo->anchor_lsn, tlinfo, instance_config.xlog_seg_size, dry_run); else @@ -744,7 +748,7 @@ delete_backup_files(pgBackup *backup) if (backup->status == BACKUP_STATUS_DELETED) { elog(WARNING, "Backup %s already deleted", - base36enc(backup->start_time)); + backup_id_of(backup)); return; } @@ -754,7 +758,7 @@ delete_backup_files(pgBackup *backup) time2iso(timestamp, lengthof(timestamp), backup->start_time, false); elog(INFO, "Delete: %s %s", - base36enc(backup->start_time), timestamp); + backup_id_of(backup), timestamp); /* * Update STATUS to BACKUP_STATUS_DELETING in preparation for the case which @@ -942,7 +946,7 @@ delete_walfiles_in_tli(InstanceState *instanceState, XLogRecPtr keep_lsn, timeli join_path_components(wal_fullpath, instanceState->instance_wal_subdir_path, wal_file->file.name); /* save segment from purging */ - if (instance_config.wal_depth >= 0 && wal_file->keep) + if (wal_file->keep) { elog(VERBOSE, "Retain WAL segment \"%s\"", wal_fullpath); continue; @@ -1027,7 +1031,7 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, parray *backup_list, *delete_list; const char *pretty_status; int n_deleted = 0, n_found = 0; - size_t size_to_delete = 0; + int64 size_to_delete = 0; char size_to_delete_pretty[20]; pgBackup *backup; @@ -1049,6 +1053,8 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, if (parray_num(backup_list) == 0) { elog(WARNING, "Instance '%s' has no backups", instanceState->instance_name); + parray_free(delete_list); + parray_free(backup_list); return; } @@ -1083,7 +1089,7 @@ do_delete_status(InstanceState *instanceState, InstanceConfig *instance_config, backup = (pgBackup *)parray_get(delete_list, i); elog(INFO, "Backup %s with status %s %s be deleted", - base36enc(backup->start_time), status2str(backup->status), dry_run ? "can" : "will"); + backup_id_of(backup), status2str(backup->status), dry_run ? "can" : "will"); size_to_delete += backup->data_bytes; if (backup->stream) diff --git a/src/dir.c b/src/dir.c index 4ebe0939b..4b1bc2816 100644 --- a/src/dir.c +++ b/src/dir.c @@ -3,11 +3,12 @@ * dir.c: directory operation utility. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ +#include #include "pg_probackup.h" #include "utils/file.h" @@ -130,6 +131,9 @@ static void opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, const char *type); static void cleanup_tablespace(const char *path); +static void control_string_bad_format(const char* str); + + /* Tablespace mapping */ static TablespaceList tablespace_dirs = {NULL, NULL}; /* Extra directories mapping */ @@ -147,11 +151,11 @@ dir_create_dir(const char *dir, mode_t mode, bool strict) { char parent[MAXPGPATH]; - strncpy(parent, dir, MAXPGPATH); + strlcpy(parent, dir, MAXPGPATH); get_parent_directory(parent); /* Create parent first */ - if (access(parent, F_OK) == -1) + if (strlen(parent) > 0 && access(parent, F_OK) == -1) dir_create_dir(parent, mode, false); /* Create directory */ @@ -178,7 +182,7 @@ pgFileNew(const char *path, const char *rel_path, bool follow_symlink, /* file not found is not an error case */ if (errno == ENOENT) return NULL; - elog(ERROR, "cannot stat file \"%s\": %s", path, + elog(ERROR, "Cannot stat file \"%s\": %s", path, strerror(errno)); } @@ -258,137 +262,6 @@ pgFileDelete(mode_t mode, const char *full_path) } } -/* - * Read the local file to compute its CRC. - * We cannot make decision about file decompression because - * user may ask to backup already compressed files and we should be - * obvious about it. - */ -pg_crc32 -pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) -{ - FILE *fp; - pg_crc32 crc = 0; - char *buf; - size_t len = 0; - - INIT_FILE_CRC32(use_crc32c, crc); - - /* open file in binary read mode */ - fp = fopen(file_path, PG_BINARY_R); - if (fp == NULL) - { - if (errno == ENOENT) - { - if (missing_ok) - { - FIN_FILE_CRC32(use_crc32c, crc); - return crc; - } - } - - elog(ERROR, "Cannot open file \"%s\": %s", - file_path, strerror(errno)); - } - - /* disable stdio buffering */ - setvbuf(fp, NULL, _IONBF, BUFSIZ); - buf = pgut_malloc(STDIO_BUFSIZE); - - /* calc CRC of file */ - for (;;) - { - if (interrupted) - elog(ERROR, "interrupted during CRC calculation"); - - len = fread(buf, 1, STDIO_BUFSIZE, fp); - - if (ferror(fp)) - elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); - - /* update CRC */ - COMP_FILE_CRC32(use_crc32c, crc, buf, len); - - if (feof(fp)) - break; - } - - FIN_FILE_CRC32(use_crc32c, crc); - fclose(fp); - pg_free(buf); - - return crc; -} - -/* - * Read the local file to compute its CRC. - * We cannot make decision about file decompression because - * user may ask to backup already compressed files and we should be - * obvious about it. - */ -pg_crc32 -pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) -{ - gzFile fp; - pg_crc32 crc = 0; - int len = 0; - int err; - char *buf; - - INIT_FILE_CRC32(use_crc32c, crc); - - /* open file in binary read mode */ - fp = gzopen(file_path, PG_BINARY_R); - if (fp == NULL) - { - if (errno == ENOENT) - { - if (missing_ok) - { - FIN_FILE_CRC32(use_crc32c, crc); - return crc; - } - } - - elog(ERROR, "Cannot open file \"%s\": %s", - file_path, strerror(errno)); - } - - buf = pgut_malloc(STDIO_BUFSIZE); - - /* calc CRC of file */ - for (;;) - { - if (interrupted) - elog(ERROR, "interrupted during CRC calculation"); - - len = gzread(fp, buf, STDIO_BUFSIZE); - - if (len <= 0) - { - /* we either run into eof or error */ - if (gzeof(fp)) - break; - else - { - const char *err_str = NULL; - - err_str = gzerror(fp, &err); - elog(ERROR, "Cannot read from compressed file %s", err_str); - } - } - - /* update CRC */ - COMP_FILE_CRC32(use_crc32c, crc, buf, len); - } - - FIN_FILE_CRC32(use_crc32c, crc); - gzclose(fp); - pg_free(buf); - - return crc; -} - void pgFileFree(void *file) { @@ -636,7 +509,7 @@ dir_check_file(pgFile *file, bool backup_logs) pgdata_exclude_files_non_exclusive[i]) == 0) { /* Skip */ - elog(VERBOSE, "Excluding file: %s", file->name); + elog(LOG, "Excluding file: %s", file->name); return CHECK_FALSE; } } @@ -645,7 +518,7 @@ dir_check_file(pgFile *file, bool backup_logs) if (strcmp(file->rel_path, pgdata_exclude_files[i]) == 0) { /* Skip */ - elog(VERBOSE, "Excluding file: %s", file->name); + elog(LOG, "Excluding file: %s", file->name); return CHECK_FALSE; } } @@ -665,7 +538,7 @@ dir_check_file(pgFile *file, bool backup_logs) /* exclude by dirname */ if (strcmp(file->name, pgdata_exclude_dir[i]) == 0) { - elog(VERBOSE, "Excluding directory content: %s", file->rel_path); + elog(LOG, "Excluding directory content: %s", file->rel_path); return CHECK_EXCLUDE_FALSE; } } @@ -675,7 +548,7 @@ dir_check_file(pgFile *file, bool backup_logs) if (strcmp(file->rel_path, PG_LOG_DIR) == 0) { /* Skip */ - elog(VERBOSE, "Excluding directory content: %s", file->rel_path); + elog(LOG, "Excluding directory content: %s", file->rel_path); return CHECK_EXCLUDE_FALSE; } } @@ -754,59 +627,10 @@ dir_check_file(pgFile *file, bool backup_logs) return CHECK_FALSE; else if (isdigit(file->name[0])) { - char *fork_name; - int len; - char suffix[MAXPGPATH]; - - fork_name = strstr(file->name, "_"); - if (fork_name) - { - /* Auxiliary fork of the relfile */ - if (strcmp(fork_name, "_vm") == 0) - file->forkName = vm; + set_forkname(file); - else if (strcmp(fork_name, "_fsm") == 0) - file->forkName = fsm; - - else if (strcmp(fork_name, "_cfm") == 0) - file->forkName = cfm; - - else if (strcmp(fork_name, "_ptrack") == 0) - file->forkName = ptrack; - - else if (strcmp(fork_name, "_init") == 0) - file->forkName = init; - - // extract relOid for certain forks - if (file->forkName == vm || - file->forkName == fsm || - file->forkName == init || - file->forkName == cfm) - { - // sanity - if (sscanf(file->name, "%u_*", &(file->relOid)) != 1) - file->relOid = 0; - } - - /* Do not backup ptrack files */ - if (file->forkName == ptrack) - return CHECK_FALSE; - } - else - { - - len = strlen(file->name); - /* reloid.cfm */ - if (len > 3 && strcmp(file->name + len - 3, "cfm") == 0) - return CHECK_TRUE; - - sscanf_res = sscanf(file->name, "%u.%d.%s", &(file->relOid), - &(file->segno), suffix); - if (sscanf_res == 0) - elog(ERROR, "Cannot parse file name \"%s\"", file->name); - else if (sscanf_res == 1 || sscanf_res == 2) - file->is_datafile = true; - } + if (file->forkName == ptrack) /* Compatibility with left-overs from ptrack1 */ + return CHECK_FALSE; } } @@ -963,14 +787,14 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, for (arg_ptr = arg; *arg_ptr; arg_ptr++) { if (dst_ptr - dst >= MAXPGPATH) - elog(ERROR, "directory name too long"); + elog(ERROR, "Directory name too long"); if (*arg_ptr == '\\' && *(arg_ptr + 1) == '=') ; /* skip backslash escaping = */ else if (*arg_ptr == '=' && (arg_ptr == arg || *(arg_ptr - 1) != '\\')) { if (*cell->new_dir) - elog(ERROR, "multiple \"=\" signs in %s mapping\n", type); + elog(ERROR, "Multiple \"=\" signs in %s mapping\n", type); else dst = dst_ptr = cell->new_dir; } @@ -979,7 +803,7 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, } if (!*cell->old_dir || !*cell->new_dir) - elog(ERROR, "invalid %s mapping format \"%s\", " + elog(ERROR, "Invalid %s mapping format \"%s\", " "must be \"OLDDIR=NEWDIR\"", type, arg); canonicalize_path(cell->old_dir); canonicalize_path(cell->new_dir); @@ -991,11 +815,11 @@ opt_path_map(ConfigOption *opt, const char *arg, TablespaceList *list, * consistent with the new_dir check. */ if (!is_absolute_path(cell->old_dir)) - elog(ERROR, "old directory is not an absolute path in %s mapping: %s\n", + elog(ERROR, "Old directory is not an absolute path in %s mapping: %s\n", type, cell->old_dir); if (!is_absolute_path(cell->new_dir)) - elog(ERROR, "new directory is not an absolute path in %s mapping: %s\n", + elog(ERROR, "New directory is not an absolute path in %s mapping: %s\n", type, cell->new_dir); if (list->tail) @@ -1036,13 +860,20 @@ opt_externaldir_map(ConfigOption *opt, const char *arg) */ void create_data_directories(parray *dest_files, const char *data_dir, const char *backup_dir, - bool extract_tablespaces, bool incremental, fio_location location) + bool extract_tablespaces, bool incremental, fio_location location, + const char* waldir_path) { int i; parray *links = NULL; mode_t pg_tablespace_mode = DIR_PERMISSION; char to_path[MAXPGPATH]; + if (waldir_path && !dir_is_empty(waldir_path, location)) + { + elog(ERROR, "WAL directory location is not empty: \"%s\"", waldir_path); + } + + /* get tablespace map */ if (extract_tablespaces) { @@ -1107,12 +938,33 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba /* skip external directory content */ if (dir->external_dir_num != 0) continue; + /* Create WAL directory and symlink if waldir_path is setting */ + if (waldir_path && strcmp(dir->rel_path, PG_XLOG_DIR) == 0) { + /* get full path to PG_XLOG_DIR */ + + join_path_components(to_path, data_dir, PG_XLOG_DIR); + + elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + waldir_path, to_path); + + /* create tablespace directory from waldir_path*/ + fio_mkdir(waldir_path, pg_tablespace_mode, location); + + /* create link to linked_path */ + if (fio_symlink(waldir_path, to_path, incremental, location) < 0) + elog(ERROR, "Could not create symbolic link \"%s\": %s", + to_path, strerror(errno)); + + continue; + + + } /* tablespace_map exists */ if (links) { /* get parent dir of rel_path */ - strncpy(parent_dir, dir->rel_path, MAXPGPATH); + strlcpy(parent_dir, dir->rel_path, MAXPGPATH); get_parent_directory(parent_dir); /* check if directory is actually link to tablespace */ @@ -1134,7 +986,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba join_path_components(to_path, data_dir, dir->rel_path); - elog(VERBOSE, "Create directory \"%s\" and symbolic link \"%s\"", + elog(LOG, "Create directory \"%s\" and symbolic link \"%s\"", linked_path, to_path); /* create tablespace directory */ @@ -1151,7 +1003,7 @@ create_data_directories(parray *dest_files, const char *data_dir, const char *ba } /* This is not symlink, create directory */ - elog(VERBOSE, "Create directory \"%s\"", dir->rel_path); + elog(LOG, "Create directory \"%s\"", dir->rel_path); join_path_components(to_path, data_dir, dir->rel_path); @@ -1194,7 +1046,7 @@ read_tablespace_map(parray *links, const char *backup_dir) int i = 0; if (sscanf(buf, "%s %n", link_name, &n) != 1) - elog(ERROR, "invalid format found in \"%s\"", map_path); + elog(ERROR, "Invalid format found in \"%s\"", map_path); path = buf + n; @@ -1249,7 +1101,7 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg bool tblspaces_are_empty = true; elog(LOG, "Checking tablespace directories of backup %s", - base36enc(backup->start_time)); + backup_id_of(backup)); /* validate tablespace map, * if there are no tablespaces, then there is nothing left to do @@ -1263,7 +1115,7 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg */ if (tablespace_dirs.head != NULL) elog(ERROR, "Backup %s has no tablespaceses, nothing to remap " - "via \"--tablespace-mapping\" option", base36enc(backup->backup_id)); + "via \"--tablespace-mapping\" option", backup_id_of(backup)); return NoTblspc; } @@ -1323,7 +1175,6 @@ check_tablespace_mapping(pgBackup *backup, bool incremental, bool force, bool pg { pgFile *link = (pgFile *) parray_get(links, i); const char *linked_path = link->linked; - TablespaceListCell *cell; bool remapped = false; for (cell = tablespace_dirs.head; cell; cell = cell->next) @@ -1398,7 +1249,7 @@ check_external_dir_mapping(pgBackup *backup, bool incremental) int i; elog(LOG, "check external directories of backup %s", - base36enc(backup->start_time)); + backup_id_of(backup)); if (!backup->external_dir_str) { @@ -1467,7 +1318,7 @@ get_external_remap(char *current_dir) return current_dir; } -/* Parsing states for get_control_value() */ +/* Parsing states for get_control_value_str() */ #define CONTROL_WAIT_NAME 1 #define CONTROL_INNAME 2 #define CONTROL_WAIT_COLON 3 @@ -1481,26 +1332,62 @@ get_external_remap(char *current_dir) * The line has the following format: * {"name1":"value1", "name2":"value2"} * - * The value will be returned to "value_str" as string if it is not NULL. If it - * is NULL the value will be returned to "value_int64" as int64. + * The value will be returned in "value_int64" as int64. + * + * Returns true if the value was found in the line and parsed. + */ +bool +get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory) +{ + + char buf_int64[32]; + + assert(value_int64); + + /* Set default value */ + *value_int64 = 0; + + if (!get_control_value_str(str, name, buf_int64, sizeof(buf_int64), is_mandatory)) + return false; + + if (!parse_int64(buf_int64, value_int64, 0)) + { + /* We assume that too big value is -1 */ + if (errno == ERANGE) + *value_int64 = BYTES_INVALID; + else + control_string_bad_format(str); + return false; + } + + return true; +} + +/* + * Get value from json-like line "str" of backup_content.control file. + * + * The line has the following format: + * {"name1":"value1", "name2":"value2"} + * + * The value will be returned to "value_str" as string. * * Returns true if the value was found in the line. */ + bool -get_control_value(const char *str, const char *name, - char *value_str, int64 *value_int64, bool is_mandatory) +get_control_value_str(const char *str, const char *name, + char *value_str, size_t value_str_size, bool is_mandatory) { int state = CONTROL_WAIT_NAME; char *name_ptr = (char *) name; char *buf = (char *) str; - char buf_int64[32], /* Buffer for "value_int64" */ - *buf_int64_ptr = buf_int64; + char *const value_str_start = value_str; - /* Set default values */ - if (value_str) - *value_str = '\0'; - else if (value_int64) - *value_int64 = 0; + assert(value_str); + assert(value_str_size > 0); + + /* Set default value */ + *value_str = '\0'; while (*buf) { @@ -1510,7 +1397,7 @@ get_control_value(const char *str, const char *name, if (*buf == '"') state = CONTROL_INNAME; else if (IsAlpha(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_INNAME: /* Found target field. Parse value. */ @@ -1529,57 +1416,32 @@ get_control_value(const char *str, const char *name, if (*buf == ':') state = CONTROL_WAIT_VALUE; else if (!IsSpace(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_WAIT_VALUE: if (*buf == '"') { state = CONTROL_INVALUE; - buf_int64_ptr = buf_int64; } else if (IsAlpha(*buf)) - goto bad_format; + control_string_bad_format(str); break; case CONTROL_INVALUE: /* Value was parsed, exit */ if (*buf == '"') { - if (value_str) - { - *value_str = '\0'; - } - else if (value_int64) - { - /* Length of buf_uint64 should not be greater than 31 */ - if (buf_int64_ptr - buf_int64 >= 32) - elog(ERROR, "field \"%s\" is out of range in the line %s of the file %s", - name, str, DATABASE_FILE_LIST); - - *buf_int64_ptr = '\0'; - if (!parse_int64(buf_int64, value_int64, 0)) - { - /* We assume that too big value is -1 */ - if (errno == ERANGE) - *value_int64 = BYTES_INVALID; - else - goto bad_format; - } - } - + *value_str = '\0'; return true; } else { - if (value_str) - { - *value_str = *buf; - value_str++; - } - else - { - *buf_int64_ptr = *buf; - buf_int64_ptr++; + /* verify if value_str not exceeds value_str_size limits */ + if (value_str - value_str_start >= value_str_size - 1) { + elog(ERROR, "Field \"%s\" is out of range in the line %s of the file %s", + name, str, DATABASE_FILE_LIST); } + *value_str = *buf; + value_str++; } break; case CONTROL_WAIT_NEXT_NAME: @@ -1596,18 +1458,20 @@ get_control_value(const char *str, const char *name, /* There is no close quotes */ if (state == CONTROL_INNAME || state == CONTROL_INVALUE) - goto bad_format; + control_string_bad_format(str); /* Did not find target field */ if (is_mandatory) - elog(ERROR, "field \"%s\" is not found in the line %s of the file %s", + elog(ERROR, "Field \"%s\" is not found in the line %s of the file %s", name, str, DATABASE_FILE_LIST); return false; +} -bad_format: - elog(ERROR, "%s file has invalid format in line %s", - DATABASE_FILE_LIST, str); - return false; /* Make compiler happy */ +static void +control_string_bad_format(const char* str) +{ + elog(ERROR, "%s file has invalid format in line %s", + DATABASE_FILE_LIST, str); } /* @@ -1625,7 +1489,7 @@ dir_is_empty(const char *path, fio_location location) /* Directory in path doesn't exist */ if (errno == ENOENT) return true; - elog(ERROR, "cannot open directory \"%s\": %s", path, strerror(errno)); + elog(ERROR, "Cannot open directory \"%s\": %s", path, strerror(errno)); } errno = 0; @@ -1641,7 +1505,7 @@ dir_is_empty(const char *path, fio_location location) return false; } if (errno) - elog(ERROR, "cannot read directory \"%s\": %s", path, strerror(errno)); + elog(ERROR, "Cannot read directory \"%s\": %s", path, strerror(errno)); fio_closedir(dir); @@ -1802,7 +1666,7 @@ write_database_map(pgBackup *backup, parray *database_map, parray *backup_files_ FIO_BACKUP_HOST); file->crc = pgFileGetCRC(database_map_path, true, false); file->write_size = file->size; - file->uncompressed_size = file->read_size; + file->uncompressed_size = file->size; parray_append(backup_files_list, file); } @@ -1841,8 +1705,8 @@ read_database_map(pgBackup *backup) db_map_entry *db_entry = (db_map_entry *) pgut_malloc(sizeof(db_map_entry)); - get_control_value(buf, "dbOid", NULL, &dbOid, true); - get_control_value(buf, "datname", datname, NULL, true); + get_control_value_int64(buf, "dbOid", &dbOid, true); + get_control_value_str(buf, "datname", datname, sizeof(datname), true); db_entry->dbOid = dbOid; db_entry->datname = pgut_strdup(datname); @@ -1889,7 +1753,7 @@ cleanup_tablespace(const char *path) join_path_components(fullpath, path, file->rel_path); fio_delete(file->mode, fullpath, FIO_DB_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + elog(LOG, "Deleted file \"%s\"", fullpath); } parray_walk(files, pgFileFree); @@ -1909,3 +1773,98 @@ pfilearray_clear_locks(parray *file_list) pg_atomic_clear_flag(&file->lock); } } + +static inline bool +is_forkname(char *name, size_t *pos, const char *forkname) +{ + size_t fnlen = strlen(forkname); + if (strncmp(name + *pos, forkname, fnlen) != 0) + return false; + *pos += fnlen; + return true; +} + +#define OIDCHARS 10 +#define MAXSEGNO (((uint64_t)1<<32)/RELSEG_SIZE-1) +#define SEGNOCHARS 5 /* when BLCKSZ == (1<<15) */ + +/* Set forkName if possible */ +bool +set_forkname(pgFile *file) +{ + size_t i = 0; + uint64_t oid = 0; /* use 64bit to not check for overflow in a loop */ + uint64_t segno = 0; + + /* pretend it is not relation file */ + file->relOid = 0; + file->forkName = none; + file->is_datafile = false; + + for (i = 0; isdigit(file->name[i]); i++) + { + if (i == 0 && file->name[i] == '0') + return false; + oid = oid * 10 + file->name[i] - '0'; + } + if (i == 0 || i > OIDCHARS || oid > UINT32_MAX) + return false; + + /* usual fork name */ + /* /^\d+_(vm|fsm|init|ptrack)$/ */ + if (is_forkname(file->name, &i, "_vm")) + file->forkName = vm; + else if (is_forkname(file->name, &i, "_fsm")) + file->forkName = fsm; + else if (is_forkname(file->name, &i, "_init")) + file->forkName = init; + else if (is_forkname(file->name, &i, "_ptrack")) + file->forkName = ptrack; + + /* segment number */ + /* /^\d+(_(vm|fsm|init|ptrack))?\.\d+$/ */ + if (file->name[i] == '.' && isdigit(file->name[i+1])) + { + size_t start = i+1; + for (i++; isdigit(file->name[i]); i++) + { + if (i == start && file->name[i] == '0') + return false; + segno = segno * 10 + file->name[i] - '0'; + } + if (i - start > SEGNOCHARS || segno > MAXSEGNO) + return false; + } + + /* CFS family fork names */ + if (file->forkName == none && + is_forkname(file->name, &i, ".cfm.bck")) + { + /* /^\d+(\.\d+)?\.cfm\.bck$/ */ + file->forkName = cfm_bck; + } + if (file->forkName == none && + is_forkname(file->name, &i, ".bck")) + { + /* /^\d+(\.\d+)?\.bck$/ */ + file->forkName = cfs_bck; + } + if (file->forkName == none && + is_forkname(file->name, &i, ".cfm")) + { + /* /^\d+(\.\d+)?.cfm$/ */ + file->forkName = cfm; + } + + /* If there are excess characters, it is not relation file */ + if (file->name[i] != 0) + { + file->forkName = none; + return false; + } + + file->relOid = oid; + file->segno = segno; + file->is_datafile = file->forkName == none; + return true; +} \ No newline at end of file diff --git a/src/fetch.c b/src/fetch.c index bef30dac6..5401d815e 100644 --- a/src/fetch.c +++ b/src/fetch.c @@ -92,7 +92,7 @@ fetchFile(PGconn *conn, const char *filename, size_t *filesize) /* sanity check the result set */ if (PQntuples(res) != 1 || PQgetisnull(res, 0, 0)) - elog(ERROR, "unexpected result set while fetching remote file \"%s\"", + elog(ERROR, "Unexpected result set while fetching remote file \"%s\"", filename); /* Read result to local variables */ diff --git a/src/help.c b/src/help.c index a494ab209..e18706a13 100644 --- a/src/help.c +++ b/src/help.c @@ -87,13 +87,14 @@ help_pg_probackup(void) printf(_("\n %s version\n"), PROGRAM_NAME); - printf(_("\n %s init -B backup-path\n"), PROGRAM_NAME); + printf(_("\n %s init -B backup-dir\n"), PROGRAM_NAME); - printf(_("\n %s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s set-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path]\n")); printf(_(" [--external-dirs=external-directories-paths]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -113,16 +114,17 @@ help_pg_probackup(void) printf(_(" [--archive-port=port] [--archive-user=username]\n")); printf(_(" [--help]\n")); - printf(_("\n %s set-backup -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s set-backup -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [--ttl=interval] [--expire-time=timestamp]\n")); printf(_(" [--note=text]\n")); printf(_(" [--help]\n")); - printf(_("\n %s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s show-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [--format=format]\n")); + printf(_(" [--no-scale-units]\n")); printf(_(" [--help]\n")); - printf(_("\n %s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s backup -B backup-dir -b backup-mode --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-C]\n")); printf(_(" [--stream [-S slot-name] [--temp-slot]]\n")); printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n")); @@ -131,6 +133,8 @@ help_pg_probackup(void) printf(_(" [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-console=log-format-console]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -153,7 +157,7 @@ help_pg_probackup(void) printf(_(" [--help]\n")); - printf(_("\n %s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s restore -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -169,8 +173,10 @@ help_pg_probackup(void) printf(_(" [-T OLDDIR=NEWDIR] [--progress]\n")); printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n")); printf(_(" [--skip-external-dirs] [--no-sync]\n")); + printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include | --db-exclude]\n")); + printf(_(" [--destroy-all-other-dbs]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); @@ -178,7 +184,7 @@ help_pg_probackup(void) printf(_(" [--archive-port=port] [--archive-user=username]\n")); printf(_(" [--help]\n")); - printf(_("\n %s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n %s validate -B backup-dir [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-i backup-id] [--progress] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -187,18 +193,18 @@ help_pg_probackup(void) printf(_(" [--skip-block-validation]\n")); printf(_(" [--help]\n")); - printf(_("\n %s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n %s checkdb [-B backup-dir] [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [--progress] [-j num-threads]\n")); printf(_(" [--amcheck] [--skip-block-validation]\n")); printf(_(" [--heapallindexed] [--checkunique]\n")); printf(_(" [--help]\n")); - printf(_("\n %s show -B backup-path\n"), PROGRAM_NAME); - printf(_(" [--instance=instance_name [-i backup-id]]\n")); + printf(_("\n %s show -B backup-dir\n"), PROGRAM_NAME); + printf(_(" [--instance=instance-name [-i backup-id]]\n")); printf(_(" [--format=format] [--archive]\n")); printf(_(" [--no-color] [--help]\n")); - printf(_("\n %s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s delete -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-j num-threads] [--progress]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); printf(_(" [--retention-window=retention-window]\n")); @@ -208,24 +214,24 @@ help_pg_probackup(void) printf(_(" [--dry-run] [--no-validate] [--no-sync]\n")); printf(_(" [--help]\n")); - printf(_("\n %s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s merge -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [--progress] [-j num-threads]\n")); printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--help]\n")); - printf(_("\n %s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n %s add-instance -B backup-dir -D pgdata-path\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [--external-dirs=external-directories-paths]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); printf(_(" [--help]\n")); - printf(_("\n %s del-instance -B backup-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n %s del-instance -B backup-dir\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [--help]\n")); - printf(_("\n %s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s archive-push -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -239,7 +245,7 @@ help_pg_probackup(void) printf(_(" [--ssh-options]\n")); printf(_(" [--help]\n")); - printf(_("\n %s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n %s archive-get -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-path=wal-file-path\n")); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -261,15 +267,16 @@ help_pg_probackup(void) printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); + printf(_(" [--dry-run]\n")); printf(_(" [--help]\n")); if ((PROGRAM_URL || PROGRAM_EMAIL)) { printf("\n"); if (PROGRAM_URL) - printf("Read the website for details. <%s>\n", PROGRAM_URL); + printf(_("Read the website for details <%s>.\n"), PROGRAM_URL); if (PROGRAM_EMAIL) - printf("Report bugs to <%s>.\n", PROGRAM_EMAIL); + printf(_("Report bugs to <%s>.\n"), PROGRAM_EMAIL); } } @@ -288,14 +295,14 @@ help_internal(void) static void help_init(void) { - printf(_("\n%s init -B backup-path\n\n"), PROGRAM_NAME); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n\n")); + printf(_("\n%s init -B backup-dir\n\n"), PROGRAM_NAME); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n\n")); } static void help_backup(void) { - printf(_("\n%s backup -B backup-path -b backup-mode --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s backup -B backup-dir -b backup-mode --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-C]\n")); printf(_(" [--stream [-S slot-name] [--temp-slot]]\n")); printf(_(" [--backup-pg-log] [-j num-threads] [--progress]\n")); @@ -304,6 +311,8 @@ help_backup(void) printf(_(" [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-console=log-format-console]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -324,9 +333,9 @@ help_backup(void) printf(_(" [--ssh-options]\n")); printf(_(" [--ttl=interval] [--expire-time=timestamp] [--note=text]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); printf(_(" -b, --backup-mode=backup-mode backup mode=FULL|PAGE|DELTA|PTRACK\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -C, --smooth-checkpoint do smooth checkpoint before backup\n")); printf(_(" --stream stream the transaction log and include it in the backup\n")); @@ -351,6 +360,12 @@ help_backup(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -427,15 +442,17 @@ help_backup(void) static void help_restore(void) { - printf(_("\n%s restore -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s restore -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-i backup-id] [-j num-threads]\n")); printf(_(" [--progress] [--force] [--no-sync]\n")); printf(_(" [--no-validate] [--skip-block-validation]\n")); printf(_(" [-T OLDDIR=NEWDIR]\n")); printf(_(" [--external-mapping=OLDDIR=NEWDIR]\n")); printf(_(" [--skip-external-dirs]\n")); + printf(_(" [-X WALDIR | --waldir=WALDIR]\n")); printf(_(" [-I | --incremental-mode=none|checksum|lsn]\n")); printf(_(" [--db-include dbname | --db-exclude dbname]\n")); + printf(_(" [--destroy-all-other-dbs]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); printf(_(" [--recovery-target-timeline=timeline]\n")); @@ -452,8 +469,8 @@ help_restore(void) printf(_(" [--archive-host=hostname] [--archive-port=port]\n")); printf(_(" [--archive-user=username]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -i, --backup-id=backup-id backup to restore\n")); @@ -471,6 +488,10 @@ help_restore(void) printf(_(" relocate the external directory from OLDDIR to NEWDIR\n")); printf(_(" --skip-external-dirs do not restore all external directories\n")); + + printf(_(" -X, --waldir=WALDIR location for the write-ahead log directory\n")); + + printf(_("\n Incremental restore options:\n")); printf(_(" -I, --incremental-mode=none|checksum|lsn\n")); printf(_(" reuse valid pages available in PGDATA if they have not changed\n")); @@ -479,6 +500,9 @@ help_restore(void) printf(_("\n Partial restore options:\n")); printf(_(" --db-include dbname restore only specified databases\n")); printf(_(" --db-exclude dbname do not restore specified databases\n")); + printf(_(" --destroy-all-other-dbs\n")); + printf(_(" allows to do partial restore that is prohibited by default,\n")); + printf(_(" because it might remove all other databases.\n")); printf(_("\n Recovery options:\n")); printf(_(" --recovery-target-time=time time stamp up to which recovery will proceed\n")); @@ -512,6 +536,12 @@ help_restore(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -547,7 +577,7 @@ help_restore(void) static void help_validate(void) { - printf(_("\n%s validate -B backup-path [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n%s validate -B backup-dir [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-i backup-id] [--progress] [-j num-threads]\n")); printf(_(" [--recovery-target-time=time|--recovery-target-xid=xid\n")); printf(_(" |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]]\n")); @@ -555,8 +585,8 @@ help_validate(void) printf(_(" [--recovery-target-name=target-name]\n")); printf(_(" [--skip-block-validation]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to validate\n")); printf(_(" --progress show progress\n")); @@ -579,6 +609,12 @@ help_validate(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -598,13 +634,13 @@ help_validate(void) static void help_checkdb(void) { - printf(_("\n%s checkdb [-B backup-path] [--instance=instance_name]\n"), PROGRAM_NAME); + printf(_("\n%s checkdb [-B backup-dir] [--instance=instance-name]\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path] [-j num-threads] [--progress]\n")); printf(_(" [--amcheck] [--skip-block-validation]\n")); printf(_(" [--heapallindexed] [--checkunique]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" --progress show progress\n")); @@ -626,6 +662,12 @@ help_checkdb(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log\n")); @@ -653,12 +695,12 @@ help_checkdb(void) static void help_show(void) { - printf(_("\n%s show -B backup-path\n"), PROGRAM_NAME); - printf(_(" [--instance=instance_name [-i backup-id]]\n")); + printf(_("\n%s show -B backup-dir\n"), PROGRAM_NAME); + printf(_(" [--instance=instance-name [-i backup-id]]\n")); printf(_(" [--format=format] [--archive]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name show info about specific instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name show info about specific instance\n")); printf(_(" -i, --backup-id=backup-id show info about specific backups\n")); printf(_(" --archive show WAL archive information\n")); printf(_(" --format=format show format=PLAIN|JSON\n")); @@ -668,7 +710,7 @@ help_show(void) static void help_delete(void) { - printf(_("\n%s delete -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s delete -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-i backup-id | --delete-expired | --merge-expired] [--delete-wal]\n")); printf(_(" [-j num-threads] [--progress]\n")); printf(_(" [--retention-redundancy=retention-redundancy]\n")); @@ -676,8 +718,8 @@ help_delete(void) printf(_(" [--wal-depth=wal-depth]\n")); printf(_(" [--no-validate] [--no-sync]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to delete\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); printf(_(" --progress show progress\n")); @@ -706,6 +748,12 @@ help_delete(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -725,19 +773,21 @@ help_delete(void) static void help_merge(void) { - printf(_("\n%s merge -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s merge -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id [-j num-threads] [--progress]\n")); printf(_(" [--no-validate] [--no-sync]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-console=log-format-console]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); printf(_(" [--log-rotation-size=log-rotation-size]\n")); printf(_(" [--log-rotation-age=log-rotation-age]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -i, --backup-id=backup-id backup to merge\n")); printf(_(" -j, --threads=NUM number of parallel threads\n")); @@ -752,6 +802,12 @@ help_merge(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-console=log-format-console\n")); + printf(_(" defines the format of the console log (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -771,7 +827,7 @@ help_merge(void) static void help_set_backup(void) { - printf(_("\n%s set-backup -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s set-backup -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" -i backup-id\n")); printf(_(" [--ttl=interval] [--expire-time=time] [--note=text]\n\n")); @@ -787,12 +843,13 @@ help_set_backup(void) static void help_set_config(void) { - printf(_("\n%s set-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s set-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [-D pgdata-path]\n")); printf(_(" [-E external-directories-paths]\n")); printf(_(" [--restore-command=cmdline]\n")); printf(_(" [--log-level-console=log-level-console]\n")); printf(_(" [--log-level-file=log-level-file]\n")); + printf(_(" [--log-format-file=log-format-file]\n")); printf(_(" [--log-filename=log-filename]\n")); printf(_(" [--error-log-filename=error-log-filename]\n")); printf(_(" [--log-directory=log-directory]\n")); @@ -809,8 +866,8 @@ help_set_config(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); printf(_(" -E --external-dirs=external-directories-paths\n")); printf(_(" backup some directories not from pgdata \n")); @@ -824,6 +881,9 @@ help_set_config(void) printf(_(" --log-level-file=log-level-file\n")); printf(_(" level for file logging (default: off)\n")); printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); printf(_(" --log-filename=log-filename\n")); printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); @@ -889,27 +949,28 @@ help_set_config(void) static void help_show_config(void) { - printf(_("\n%s show-config -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s show-config -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" [--format=format]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance\n")); - printf(_(" --format=format show format=PLAIN|JSON\n\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance\n")); + printf(_(" --format=format show format=PLAIN|JSON\n")); + printf(_(" --no-scale-units show memory and time values in default units\n\n")); } static void help_add_instance(void) { - printf(_("\n%s add-instance -B backup-path -D pgdata-path\n"), PROGRAM_NAME); - printf(_(" --instance=instance_name\n")); + printf(_("\n%s add-instance -B backup-dir -D pgdata-path\n"), PROGRAM_NAME); + printf(_(" --instance=instance-name\n")); printf(_(" [-E external-directory-path]\n")); printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); printf(_(" -D, --pgdata=pgdata-path location of the database storage area\n")); - printf(_(" --instance=instance_name name of the new instance\n")); + printf(_(" --instance=instance-name name of the new instance\n")); printf(_(" -E --external-dirs=external-directories-paths\n")); printf(_(" backup some directories not from pgdata \n")); @@ -924,21 +985,45 @@ help_add_instance(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); + + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); } static void help_del_instance(void) { - printf(_("\n%s del-instance -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s del-instance -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n\n")); } static void help_archive_push(void) { - printf(_("\n%s archive-push -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s archive-push -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -951,8 +1036,8 @@ help_archive_push(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n")); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" name of the file to copy into WAL archive\n")); printf(_(" --wal-file-path=wal-file-path\n")); @@ -971,6 +1056,30 @@ help_archive_push(void) printf(_(" --compress-level=compress-level\n")); printf(_(" level of compression [0-9] (default: 1)\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); @@ -986,7 +1095,7 @@ help_archive_push(void) static void help_archive_get(void) { - printf(_("\n%s archive-get -B backup-path --instance=instance_name\n"), PROGRAM_NAME); + printf(_("\n%s archive-get -B backup-dir --instance=instance-name\n"), PROGRAM_NAME); printf(_(" --wal-file-name=wal-file-name\n")); printf(_(" [--wal-file-path=wal-file-path]\n")); printf(_(" [-j num-threads] [--batch-size=batch_size]\n")); @@ -995,8 +1104,8 @@ help_archive_get(void) printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n\n")); - printf(_(" -B, --backup-path=backup-path location of the backup storage area\n")); - printf(_(" --instance=instance_name name of the instance to delete\n")); + printf(_(" -B, --backup-path=backup-dir location of the backup storage area\n")); + printf(_(" --instance=instance-name name of the instance to delete\n")); printf(_(" --wal-file-path=wal-file-path\n")); printf(_(" relative destination path name of the WAL file on the server\n")); printf(_(" --wal-file-name=wal-file-name\n")); @@ -1006,6 +1115,30 @@ help_archive_get(void) printf(_(" --prefetch-dir=path location of the store area for prefetched WAL files\n")); printf(_(" --no-validate-wal skip validation of prefetched WAL file before using it\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); @@ -1047,6 +1180,7 @@ help_catchup(void) printf(_(" [--remote-proto] [--remote-host]\n")); printf(_(" [--remote-port] [--remote-path] [--remote-user]\n")); printf(_(" [--ssh-options]\n")); + printf(_(" [--dry-run]\n")); printf(_(" [--help]\n\n")); printf(_(" -b, --backup-mode=catchup-mode catchup mode=FULL|DELTA|PTRACK\n")); @@ -1071,6 +1205,30 @@ help_catchup(void) printf(_(" -w, --no-password never prompt for password\n")); printf(_(" -W, --password force password prompt\n\n")); + printf(_("\n Logging options:\n")); + printf(_(" --log-level-console=log-level-console\n")); + printf(_(" level for console logging (default: info)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-level-file=log-level-file\n")); + printf(_(" level for file logging (default: off)\n")); + printf(_(" available options: 'off', 'error', 'warning', 'info', 'log', 'verbose'\n")); + printf(_(" --log-format-file=log-format-file\n")); + printf(_(" defines the format of log files (default: plain)\n")); + printf(_(" available options: 'plain', 'json'\n")); + printf(_(" --log-filename=log-filename\n")); + printf(_(" filename for file logging (default: 'pg_probackup.log')\n")); + printf(_(" support strftime format (example: pg_probackup-%%Y-%%m-%%d_%%H%%M%%S.log)\n")); + printf(_(" --error-log-filename=error-log-filename\n")); + printf(_(" filename for error logging (default: none)\n")); + printf(_(" --log-directory=log-directory\n")); + printf(_(" directory for file logging (default: BACKUP_PATH/log)\n")); + printf(_(" --log-rotation-size=log-rotation-size\n")); + printf(_(" rotate logfile if its size exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'kB', 'MB', 'GB', 'TB' (default: kB)\n")); + printf(_(" --log-rotation-age=log-rotation-age\n")); + printf(_(" rotate logfile if its age exceeds this value; 0 disables; (default: 0)\n")); + printf(_(" available units: 'ms', 's', 'min', 'h', 'd' (default: min)\n")); + printf(_("\n Remote options:\n")); printf(_(" --remote-proto=protocol remote protocol to use\n")); printf(_(" available options: 'ssh', 'none' (default: ssh)\n")); @@ -1081,4 +1239,6 @@ help_catchup(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n\n")); + + printf(_(" --dry-run perform a trial run without any changes\n\n")); } diff --git a/src/init.c b/src/init.c index 8773016b5..837e2bad0 100644 --- a/src/init.c +++ b/src/init.c @@ -24,11 +24,11 @@ do_init(CatalogState *catalogState) results = pg_check_dir(catalogState->catalog_path); if (results == 4) /* exists and not empty*/ - elog(ERROR, "backup catalog already exist and it's not empty"); + elog(ERROR, "The backup catalog already exists and is not empty"); else if (results == -1) /*trouble accessing directory*/ { int errno_tmp = errno; - elog(ERROR, "cannot open backup catalog directory \"%s\": %s", + elog(ERROR, "Cannot open backup catalog directory \"%s\": %s", catalogState->catalog_path, strerror(errno_tmp)); } @@ -41,7 +41,7 @@ do_init(CatalogState *catalogState) /* create backup catalog wal directory */ dir_create_dir(catalogState->wal_subdir_path, DIR_PERMISSION, false); - elog(INFO, "Backup catalog '%s' successfully inited", catalogState->catalog_path); + elog(INFO, "Backup catalog '%s' successfully initialized", catalogState->catalog_path); return 0; } @@ -53,8 +53,9 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) /* PGDATA is always required */ if (instance->pgdata == NULL) - elog(ERROR, "Required parameter not specified: PGDATA " - "(-D, --pgdata)"); + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); /* Read system_identifier from PGDATA */ instance->system_identifier = get_system_identifier(instance->pgdata, FIO_DB_HOST, false); @@ -121,6 +122,6 @@ do_add_instance(InstanceState *instanceState, InstanceConfig *instance) /* pgdata was set through command line */ do_set_config(instanceState, true); - elog(INFO, "Instance '%s' successfully inited", instanceState->instance_name); + elog(INFO, "Instance '%s' successfully initialized", instanceState->instance_name); return 0; } diff --git a/src/merge.c b/src/merge.c index ff39c2510..e8f926795 100644 --- a/src/merge.c +++ b/src/merge.c @@ -2,7 +2,7 @@ * * merge.c: merge FULL and incremental backups * - * Copyright (c) 2018-2019, Postgres Professional + * Copyright (c) 2018-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -79,10 +79,10 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool int i; if (backup_id == INVALID_BACKUP_ID) - elog(ERROR, "required parameter is not specified: --backup-id"); + elog(ERROR, "Required parameter is not specified: --backup-id"); if (instanceState == NULL) - elog(ERROR, "required parameter is not specified: --instance"); + elog(ERROR, "Required parameter is not specified: --instance"); elog(INFO, "Merge started"); @@ -105,7 +105,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool backup->status != BACKUP_STATUS_MERGED && backup->status != BACKUP_STATUS_DELETING) elog(ERROR, "Backup %s has status: %s", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); dest_backup = backup; break; @@ -154,12 +154,12 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool full_backup = dest_backup; dest_backup = NULL; elog(INFO, "Merge target backup %s is full backup", - base36enc(full_backup->start_time)); + backup_id_of(full_backup)); /* sanity */ if (full_backup->status == BACKUP_STATUS_DELETING) elog(ERROR, "Backup %s has status: %s", - base36enc(full_backup->start_time), + backup_id_of(full_backup), status2str(full_backup->status)); /* Case #1 */ @@ -171,7 +171,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool elog(ERROR, "Merge target is full backup and has multiple direct children, " "you must specify child backup id you want to merge with"); - elog(LOG, "Looking for closest incremental backup to merge with"); + elog(INFO, "Looking for closest incremental backup to merge with"); /* Look for closest child backup */ for (i = 0; i < parray_num(backups); i++) @@ -194,7 +194,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool if (dest_backup == NULL) elog(ERROR, "Failed to find merge candidate, " "backup %s has no valid children", - base36enc(full_backup->start_time)); + backup_id_of(full_backup)); } /* Case #2 */ @@ -223,11 +223,9 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool } if (!dest_backup) { - char *tmp_backup_id = base36enc_dup(full_backup->start_time); elog(ERROR, "Full backup %s has unfinished merge with missing backup %s", - tmp_backup_id, + backup_id_of(full_backup), base36enc(full_backup->merge_dest_backup)); - pg_free(tmp_backup_id); } } else if (full_backup->status == BACKUP_STATUS_MERGED) @@ -253,16 +251,14 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool } if (!dest_backup) { - char *tmp_backup_id = base36enc_dup(full_backup->start_time); elog(WARNING, "Full backup %s has unfinished merge with missing backup %s", - tmp_backup_id, + backup_id_of(full_backup), base36enc(full_backup->merge_dest_backup)); - pg_free(tmp_backup_id); } } else elog(ERROR, "Backup %s has status: %s", - base36enc(full_backup->start_time), + backup_id_of(full_backup), status2str(full_backup->status)); } else @@ -300,7 +296,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool if (dest_backup->status == BACKUP_STATUS_MERGING || dest_backup->status == BACKUP_STATUS_DELETING) elog(WARNING, "Rerun unfinished merge for backup %s", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); /* First we should try to find parent FULL backup */ full_backup = find_parent_full_backup(dest_backup); @@ -314,7 +310,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool */ if (dest_backup->status != BACKUP_STATUS_MERGING) elog(ERROR, "Failed to find parent full backup for %s", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); /* Find FULL backup that has unfinished merge with dest backup */ for (i = 0; i < parray_num(backups); i++) @@ -331,7 +327,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool if (!full_backup) elog(ERROR, "Failed to find full backup that has unfinished merge" "with backup %s, cannot rerun merge", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); if (full_backup->status == BACKUP_STATUS_MERGED) elog(WARNING, "Incremental chain is broken, try to recover unfinished merge"); @@ -341,13 +337,12 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool else { if ((full_backup->status == BACKUP_STATUS_MERGED || - full_backup->status == BACKUP_STATUS_MERGED) && + full_backup->status == BACKUP_STATUS_MERGING) && dest_backup->start_time != full_backup->merge_dest_backup) { - char *tmp_backup_id = base36enc_dup(full_backup->start_time); elog(ERROR, "Full backup %s has unfinished merge with backup %s", - tmp_backup_id, base36enc(full_backup->merge_dest_backup)); - pg_free(tmp_backup_id); + backup_id_of(full_backup), + base36enc(full_backup->merge_dest_backup)); } } @@ -362,7 +357,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool * having status MERGED */ if (dest_backup == NULL && full_backup->status != BACKUP_STATUS_MERGED) elog(ERROR, "Cannot run merge for full backup %s", - base36enc(full_backup->start_time)); + backup_id_of(full_backup)); /* sanity */ if (full_backup->status != BACKUP_STATUS_OK && @@ -371,7 +366,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool full_backup->status != BACKUP_STATUS_MERGED && full_backup->status != BACKUP_STATUS_MERGING) elog(ERROR, "Backup %s has status: %s", - base36enc(full_backup->start_time), status2str(full_backup->status)); + backup_id_of(full_backup), status2str(full_backup->status)); /* Form merge list */ dest_backup_tmp = dest_backup; @@ -389,7 +384,7 @@ do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool dest_backup_tmp->status != BACKUP_STATUS_MERGED && dest_backup_tmp->status != BACKUP_STATUS_DELETING) elog(ERROR, "Backup %s has status: %s", - base36enc(dest_backup_tmp->start_time), + backup_id_of(dest_backup_tmp), status2str(dest_backup_tmp->status)); if (dest_backup_tmp->backup_mode == BACKUP_MODE_FULL) @@ -441,7 +436,6 @@ merge_chain(InstanceState *instanceState, bool no_validate, bool no_sync) { int i; - char *dest_backup_id; char full_external_prefix[MAXPGPATH]; char full_database_dir[MAXPGPATH]; parray *full_externals = NULL, @@ -478,26 +472,20 @@ merge_chain(InstanceState *instanceState, full_backup->status == BACKUP_STATUS_MERGED) { is_retry = true; - elog(INFO, "Retry failed merge of backup %s with parent chain", base36enc(dest_backup->start_time)); + elog(INFO, "Retry failed merge of backup %s with parent chain", backup_id_of(dest_backup)); } else - elog(INFO, "Merging backup %s with parent chain", base36enc(dest_backup->start_time)); + elog(INFO, "Merging backup %s with parent chain", backup_id_of(dest_backup)); /* sanity */ if (full_backup->merge_dest_backup != INVALID_BACKUP_ID && full_backup->merge_dest_backup != dest_backup->start_time) { - char *merge_dest_backup_current = base36enc_dup(dest_backup->start_time); - char *merge_dest_backup = base36enc_dup(full_backup->merge_dest_backup); - elog(ERROR, "Cannot run merge for %s, because full backup %s has " "unfinished merge with backup %s", - merge_dest_backup_current, - base36enc(full_backup->start_time), - merge_dest_backup); - - pg_free(merge_dest_backup_current); - pg_free(merge_dest_backup); + backup_id_of(dest_backup), + backup_id_of(full_backup), + base36enc(full_backup->merge_dest_backup)); } /* @@ -518,7 +506,7 @@ merge_chain(InstanceState *instanceState, elog(ERROR, "Backup %s has been produced by pg_probackup version %s, " "but current program version is %s. Forward compatibility " "is not supported.", - base36enc(backup->start_time), + backup_id_of(backup), backup->program_version, PROGRAM_VERSION); } @@ -561,7 +549,7 @@ merge_chain(InstanceState *instanceState, if (!no_validate) { elog(INFO, "Validate parent chain for backup %s", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); for (i = parray_num(parent_chain) - 1; i >= 0; i--) { @@ -578,7 +566,7 @@ merge_chain(InstanceState *instanceState, if (backup->status != BACKUP_STATUS_OK) elog(ERROR, "Backup %s has status %s, merge is aborted", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); } } @@ -614,7 +602,7 @@ merge_chain(InstanceState *instanceState, /* Create directories */ create_data_directories(dest_backup->files, full_database_dir, - dest_backup->root_dir, false, false, FIO_BACKUP_HOST); + dest_backup->root_dir, false, false, FIO_BACKUP_HOST, NULL); /* External directories stuff */ if (dest_backup->external_dir_str) @@ -810,7 +798,7 @@ merge_chain(InstanceState *instanceState, join_path_components(full_file_path, full_database_dir, full_file->rel_path); pgFileDelete(full_file->mode, full_file_path); - elog(VERBOSE, "Deleted \"%s\"", full_file_path); + elog(LOG, "Deleted \"%s\"", full_file_path); } } @@ -880,25 +868,26 @@ merge_chain(InstanceState *instanceState, /* * Merging finished, now we can safely update ID of the FULL backup */ - dest_backup_id = base36enc_dup(full_backup->merge_dest_backup); elog(INFO, "Rename merged full backup %s to %s", - base36enc(full_backup->start_time), dest_backup_id); + backup_id_of(full_backup), + base36enc(full_backup->merge_dest_backup)); full_backup->status = BACKUP_STATUS_OK; full_backup->start_time = full_backup->merge_dest_backup; + /* XXX BACKUP_ID change it when backup_id wouldn't match start_time */ + full_backup->backup_id = full_backup->start_time; full_backup->merge_dest_backup = INVALID_BACKUP_ID; write_backup(full_backup, true); /* Critical section end */ /* Cleanup */ - pg_free(dest_backup_id); if (threads) { pfree(threads_args); pfree(threads); } - if (result_filelist && parray_num(result_filelist) > 0) + if (result_filelist) { parray_walk(result_filelist, pgFileFree); parray_free(result_filelist); @@ -956,9 +945,8 @@ merge_files(void *arg) if (S_ISDIR(dest_file->mode)) goto done; - if (progress) - elog(INFO, "Progress: (%d/%lu). Merging file \"%s\"", - i + 1, n_files, dest_file->rel_path); + elog(progress ? INFO : LOG, "Progress: (%d/%lu). Merging file \"%s\"", + i + 1, n_files, dest_file->rel_path); if (dest_file->is_datafile && !dest_file->is_cfs) tmp_file->segno = dest_file->segno; @@ -1063,7 +1051,7 @@ merge_files(void *arg) { BackupPageHeader2 *headers = NULL; - elog(VERBOSE, "The file didn`t changed since FULL backup, skip merge: \"%s\"", + elog(LOG, "The file didn`t changed since FULL backup, skip merge: \"%s\"", file->rel_path); tmp_file->crc = file->crc; @@ -1079,7 +1067,7 @@ merge_files(void *arg) tmp_file->hdr_crc = file->hdr_crc; } else - tmp_file->uncompressed_size = tmp_file->write_size; + tmp_file->uncompressed_size = file->uncompressed_size; /* Copy header metadata from old map into a new one */ tmp_file->n_headers = file->n_headers; @@ -1144,7 +1132,7 @@ remove_dir_with_files(const char *path) join_path_components(full_path, path, file->rel_path); pgFileDelete(file->mode, full_path); - elog(VERBOSE, "Deleted \"%s\"", full_path); + elog(LOG, "Deleted \"%s\"", full_path); } /* cleanup */ @@ -1193,7 +1181,7 @@ reorder_external_dirs(pgBackup *to_backup, parray *to_external, char new_path[MAXPGPATH]; makeExternalDirPathByNum(old_path, externaldir_template, i + 1); makeExternalDirPathByNum(new_path, externaldir_template, from_num); - elog(VERBOSE, "Rename %s to %s", old_path, new_path); + elog(LOG, "Rename %s to %s", old_path, new_path); if (rename (old_path, new_path) == -1) elog(ERROR, "Could not rename directory \"%s\" to \"%s\": %s", old_path, new_path, strerror(errno)); @@ -1346,8 +1334,8 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, */ if (!from_file) { - elog(ERROR, "Failed to locate nonedata file \"%s\" in backup %s", - dest_file->rel_path, base36enc(from_backup->start_time)); + elog(ERROR, "Failed to locate non-data file \"%s\" in backup %s", + dest_file->rel_path, backup_id_of(from_backup)); continue; } @@ -1357,11 +1345,11 @@ merge_non_data_file(parray *parent_chain, pgBackup *full_backup, /* sanity */ if (!from_backup) - elog(ERROR, "Failed to found a backup containing full copy of nonedata file \"%s\"", + elog(ERROR, "Failed to found a backup containing full copy of non-data file \"%s\"", dest_file->rel_path); if (!from_file) - elog(ERROR, "Failed to locate a full copy of nonedata file \"%s\"", dest_file->rel_path); + elog(ERROR, "Failed to locate a full copy of non-data file \"%s\"", dest_file->rel_path); /* set path to source file */ if (from_file->external_dir_num) @@ -1443,11 +1431,11 @@ is_forward_compatible(parray *parent_chain) elog(WARNING, "In-place merge is disabled because of storage format incompatibility. " "Backup %s storage format version: %s, " "current storage format version: %s", - base36enc(oldest_ver_backup->start_time), + backup_id_of(oldest_ver_backup), oldest_ver_backup->program_version, STORAGE_FORMAT_VERSION); return false; } return true; -} \ No newline at end of file +} diff --git a/src/parsexlog.c b/src/parsexlog.c index 7f1ca9c75..7df169fbf 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -29,7 +29,10 @@ * RmgrNames is an array of resource manager names, to make error messages * a bit nicer. */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 +#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \ + name, +#elif PG_VERSION_NUM >= 100000 #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask) \ name, #else @@ -389,7 +392,7 @@ validate_backup_wal_from_start_to_stop(pgBackup *backup, elog(WARNING, "There are not enough WAL records to consistenly restore " "backup %s from START LSN: %X/%X to STOP LSN: %X/%X", - base36enc(backup->start_time), + backup_id_of(backup), (uint32) (backup->start_lsn >> 32), (uint32) (backup->start_lsn), (uint32) (backup->stop_lsn >> 32), @@ -407,24 +410,20 @@ validate_wal(pgBackup *backup, const char *archivedir, time_t target_time, TransactionId target_xid, XLogRecPtr target_lsn, TimeLineID tli, uint32 wal_seg_size) { - const char *backup_id; XLogRecTarget last_rec; char last_timestamp[100], target_timestamp[100]; bool all_wal = false; - /* We need free() this later */ - backup_id = base36enc(backup->start_time); - if (!XRecOffIsValid(backup->start_lsn)) elog(ERROR, "Invalid start_lsn value %X/%X of backup %s", (uint32) (backup->start_lsn >> 32), (uint32) (backup->start_lsn), - backup_id); + backup_id_of(backup)); if (!XRecOffIsValid(backup->stop_lsn)) elog(ERROR, "Invalid stop_lsn value %X/%X of backup %s", (uint32) (backup->stop_lsn >> 32), (uint32) (backup->stop_lsn), - backup_id); + backup_id_of(backup)); /* * Check that the backup has all wal files needed @@ -447,7 +446,7 @@ validate_wal(pgBackup *backup, const char *archivedir, if (backup->status == BACKUP_STATUS_CORRUPT) { - elog(WARNING, "Backup %s WAL segments are corrupted", backup_id); + elog(WARNING, "Backup %s WAL segments are corrupted", backup_id_of(backup)); return; } /* @@ -458,7 +457,7 @@ validate_wal(pgBackup *backup, const char *archivedir, !XRecOffIsValid(target_lsn)) { /* Recovery target is not given so exit */ - elog(INFO, "Backup %s WAL segments are valid", backup_id); + elog(INFO, "Backup %s WAL segments are valid", backup_id_of(backup)); return; } @@ -1440,7 +1439,18 @@ XLogThreadWorker(void *arg) * Usually SimpleXLogPageRead() does it by itself. But here we need * to do it manually to support threads. */ +#if PG_VERSION_NUM >= 150000 + if (reader_data->need_switch && ( + errormsg == NULL || + /* + * Pg15 now informs if "contrecord" is missing. + * TODO: probably we should abort reading logs at this moment. + * But we continue as we did with bug present in Pg < 15. + */ + !XLogRecPtrIsInvalid(xlogreader->abortedRecPtr))) +#else if (reader_data->need_switch && errormsg == NULL) +#endif { if (SwitchThreadToNextWal(xlogreader, thread_arg)) continue; @@ -1578,9 +1588,14 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) reader_data = (XLogReaderData *) xlogreader->private_data; reader_data->need_switch = false; +start: /* Critical section */ pthread_lock(&wal_segment_mutex); Assert(segno_next); + + if (reader_data->xlogsegno > segno_next) + segno_next = reader_data->xlogsegno; + reader_data->xlogsegno = segno_next; segnum_read++; segno_next++; @@ -1594,6 +1609,7 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) GetXLogRecPtr(reader_data->xlogsegno, 0, wal_seg_size, arg->startpoint); /* We need to close previously opened file if it wasn't closed earlier */ CleanupXLogPageRead(xlogreader); + xlogreader->currRecPtr = InvalidXLogRecPtr; /* Skip over the page header and contrecord if any */ found = XLogFindNextRecord(xlogreader, arg->startpoint); @@ -1603,6 +1619,8 @@ SwitchThreadToNextWal(XLogReaderState *xlogreader, xlog_thread_arg *arg) */ if (XLogRecPtrIsInvalid(found)) { + if (reader_data->need_switch) + goto start; /* * Check if we need to stop reading. We stop if other thread found a * target segment. @@ -1769,7 +1787,12 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, /* Is this a special record type that I recognize? */ - if (rmid == RM_DBASE_ID && rminfo == XLOG_DBASE_CREATE) + if (rmid == RM_DBASE_ID +#if PG_VERSION_NUM >= 150000 + && (rminfo == XLOG_DBASE_CREATE_WAL_LOG || rminfo == XLOG_DBASE_CREATE_FILE_COPY)) +#else + && rminfo == XLOG_DBASE_CREATE) +#endif { /* * New databases can be safely ignored. They would be completely @@ -1823,13 +1846,21 @@ extractPageInfo(XLogReaderState *record, XLogReaderData *reader_data, RmgrNames[rmid], info); } +#if PG_VERSION_NUM >= 150000 + for (block_id = 0; block_id <= record->record->max_block_id; block_id++) +#else for (block_id = 0; block_id <= record->max_block_id; block_id++) +#endif { RelFileNode rnode; ForkNumber forknum; BlockNumber blkno; +#if PG_VERSION_NUM >= 150000 + if (!XLogRecGetBlockTagExtended(record, block_id, &rnode, &forknum, &blkno, NULL)) +#else if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno)) +#endif continue; /* We only care about the main fork; others are copied as is */ @@ -1946,4 +1977,4 @@ static XLogReaderState* WalReaderAllocate(uint32 wal_seg_size, XLogReaderData *r #else return XLogReaderAllocate(&SimpleXLogPageRead, reader_data); #endif -} \ No newline at end of file +} diff --git a/src/pg_probackup.c b/src/pg_probackup.c index c5ed13175..fa67ddff5 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -78,6 +78,7 @@ pid_t my_pid = 0; __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; +time_t start_time = INVALID_BACKUP_ID; #if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; bool temp_slot = false; @@ -87,7 +88,7 @@ bool perm_slot = false; /* backup options */ bool backup_logs = false; bool smooth_checkpoint; -char *remote_agent; +bool remote_agent = false; static char *backup_note = NULL; /* catchup options */ static char *catchup_source_pgdata = NULL; @@ -97,7 +98,7 @@ static char *target_time = NULL; static char *target_xid = NULL; static char *target_lsn = NULL; static char *target_inclusive = NULL; -static TimeLineID target_tli; +static char *target_tli_string; /* timeline number, "current" or "latest"*/ static char *target_stop; static bool target_immediate; static char *target_name = NULL; @@ -122,6 +123,8 @@ static parray *datname_include_list = NULL; /* arrays for --exclude-path's */ static parray *exclude_absolute_paths_list = NULL; static parray *exclude_relative_paths_list = NULL; +static char* gl_waldir_path = NULL; +static bool allow_partial_incremental = false; /* checkdb options */ bool need_amcheck = false; @@ -161,6 +164,7 @@ bool no_validate_wal = false; /* show options */ ShowFormat show_format = SHOW_PLAIN; bool show_archive = false; +static bool show_base_units = false; /* set-backup options */ int64 ttl = -1; @@ -214,6 +218,7 @@ static ConfigOption cmd_options[] = { 'b', 184, "merge-expired", &merge_expired, SOURCE_CMD_STRICT }, { 'b', 185, "dry-run", &dry_run, SOURCE_CMD_STRICT }, { 's', 238, "note", &backup_note, SOURCE_CMD_STRICT }, + { 'U', 241, "start-time", &start_time, SOURCE_CMD_STRICT }, /* catchup options */ { 's', 239, "source-pgdata", &catchup_source_pgdata, SOURCE_CMD_STRICT }, { 's', 240, "destination-pgdata", &catchup_destination_pgdata, SOURCE_CMD_STRICT }, @@ -223,7 +228,7 @@ static ConfigOption cmd_options[] = { 's', 137, "recovery-target-xid", &target_xid, SOURCE_CMD_STRICT }, { 's', 144, "recovery-target-lsn", &target_lsn, SOURCE_CMD_STRICT }, { 's', 138, "recovery-target-inclusive", &target_inclusive, SOURCE_CMD_STRICT }, - { 'u', 139, "recovery-target-timeline", &target_tli, SOURCE_CMD_STRICT }, + { 's', 139, "recovery-target-timeline", &target_tli_string, SOURCE_CMD_STRICT }, { 's', 157, "recovery-target", &target_stop, SOURCE_CMD_STRICT }, { 'f', 'T', "tablespace-mapping", opt_tablespace_map, SOURCE_CMD_STRICT }, { 'f', 155, "external-mapping", opt_externaldir_map, SOURCE_CMD_STRICT }, @@ -238,6 +243,8 @@ static ConfigOption cmd_options[] = { 's', 160, "primary-conninfo", &primary_conninfo, SOURCE_CMD_STRICT }, { 's', 'S', "primary-slot-name",&replication_slot, SOURCE_CMD_STRICT }, { 'f', 'I', "incremental-mode", opt_incr_restore_mode, SOURCE_CMD_STRICT }, + { 's', 'X', "waldir", &gl_waldir_path, SOURCE_CMD_STRICT }, + { 'b', 242, "destroy-all-other-dbs", &allow_partial_incremental, SOURCE_CMD_STRICT }, /* checkdb options */ { 'b', 195, "amcheck", &need_amcheck, SOURCE_CMD_STRICT }, { 'b', 196, "heapallindexed", &heapallindexed, SOURCE_CMD_STRICT }, @@ -269,6 +276,8 @@ static ConfigOption cmd_options[] = /* show options */ { 'f', 165, "format", opt_show_format, SOURCE_CMD_STRICT }, { 'b', 166, "archive", &show_archive, SOURCE_CMD_STRICT }, + /* show-config options */ + { 'b', 167, "no-scale-units", &show_base_units,SOURCE_CMD_STRICT }, /* set-backup options */ { 'I', 170, "ttl", &ttl, SOURCE_CMD_STRICT, SOURCE_DEFAULT, 0, OPTION_UNIT_S, option_get_value}, { 's', 171, "expire-time", &expire_time_string, SOURCE_CMD_STRICT }, @@ -279,7 +288,7 @@ static ConfigOption cmd_options[] = { 's', 136, "time", &target_time, SOURCE_CMD_STRICT }, { 's', 137, "xid", &target_xid, SOURCE_CMD_STRICT }, { 's', 138, "inclusive", &target_inclusive, SOURCE_CMD_STRICT }, - { 'u', 139, "timeline", &target_tli, SOURCE_CMD_STRICT }, + { 's', 139, "timeline", &target_tli_string, SOURCE_CMD_STRICT }, { 's', 144, "lsn", &target_lsn, SOURCE_CMD_STRICT }, { 'b', 140, "immediate", &target_immediate, SOURCE_CMD_STRICT }, @@ -308,8 +317,13 @@ main(int argc, char *argv[]) init_config(&instance_config, instance_name); PROGRAM_NAME = get_progname(argv[0]); + set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_probackup")); PROGRAM_FULL_PATH = palloc0(MAXPGPATH); + // Setting C locale for numeric values in order to impose dot-based floating-point representation + memorize_environment_locale(); + setlocale(LC_NUMERIC, "C"); + /* Get current time */ current_time = time(NULL); @@ -352,6 +366,7 @@ main(int argc, char *argv[]) elog(ERROR, "Version mismatch, pg_probackup binary with version '%s' " "is launched as an agent for pg_probackup binary with version '%s'", PROGRAM_VERSION, argv[2]); + remote_agent = true; fio_communicate(STDIN_FILENO, STDOUT_FILENO); return 0; case HELP_CMD: @@ -422,6 +437,18 @@ main(int argc, char *argv[]) /* Parse command line only arguments */ config_get_opt(argc, argv, cmd_options, instance_options); + if (backup_subcmd == SET_CONFIG_CMD) + { + int i; + for (i = 0; i < argc; i++) + { + if (strncmp("--log-format-console", argv[i], strlen("--log-format-console")) == 0) + { + elog(ERROR, "Option 'log-format-console' set only from terminal\n"); + } + } + } + pgut_init(); if (no_color) @@ -469,7 +496,10 @@ main(int argc, char *argv[]) backup_subcmd != HELP_CMD && backup_subcmd != VERSION_CMD && backup_subcmd != CATCHUP_CMD) - elog(ERROR, "required parameter not specified: BACKUP_PATH (-B, --backup-path)"); + elog(ERROR, + "No backup catalog path specified.\n" + "Please specify it either using environment variable BACKUP_PATH or\n" + "command line option --backup-path (-B)"); /* ===== catalogState (END) ======*/ @@ -483,7 +513,7 @@ main(int argc, char *argv[]) { if (backup_subcmd != INIT_CMD && backup_subcmd != SHOW_CMD && backup_subcmd != VALIDATE_CMD && backup_subcmd != CHECKDB_CMD && backup_subcmd != CATCHUP_CMD) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); } else { @@ -596,7 +626,7 @@ main(int argc, char *argv[]) backup_path != NULL && instance_name == NULL && instance_config.pgdata == NULL) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); /* Check checkdb command options consistency */ if (backup_subcmd == CHECKDB_CMD && @@ -655,6 +685,7 @@ main(int argc, char *argv[]) if (instance_config.pgdata != NULL) canonicalize_path(instance_config.pgdata); if (instance_config.pgdata != NULL && + (backup_subcmd != ARCHIVE_GET_CMD && backup_subcmd != CATCHUP_CMD) && !is_absolute_path(instance_config.pgdata)) elog(ERROR, "-D, --pgdata must be an absolute path"); @@ -711,7 +742,7 @@ main(int argc, char *argv[]) */ recovery_target_options = parseRecoveryTargetOptions(target_time, target_xid, - target_inclusive, target_tli, target_lsn, + target_inclusive, target_tli_string, target_lsn, (target_stop != NULL) ? target_stop : (target_immediate) ? "immediate" : NULL, target_name, target_action); @@ -738,6 +769,7 @@ main(int argc, char *argv[]) restore_params->partial_restore_type = NONE; restore_params->primary_conninfo = primary_conninfo; restore_params->incremental_mode = incremental_mode; + restore_params->allow_partial_incremental = allow_partial_incremental; /* handle partial restore parameters */ if (datname_exclude_list && datname_include_list) @@ -753,6 +785,21 @@ main(int argc, char *argv[]) restore_params->partial_restore_type = INCLUDE; restore_params->partial_db_list = datname_include_list; } + + if (gl_waldir_path) + { + /* clean up xlog directory name, check it's absolute */ + canonicalize_path(gl_waldir_path); + if (!is_absolute_path(gl_waldir_path)) + { + elog(ERROR, "WAL directory location must be an absolute path"); + } + if (strlen(gl_waldir_path) > MAXPGPATH) + elog(ERROR, "Value specified to --waldir is too long"); + + } + restore_params->waldir = gl_waldir_path; + } /* @@ -793,14 +840,16 @@ main(int argc, char *argv[]) if (catchup_destination_pgdata == NULL) elog(ERROR, "You must specify \"--destination-pgdata\" option with the \"%s\" command", get_subcmd_name(backup_subcmd)); if (current.backup_mode == BACKUP_MODE_INVALID) - elog(ERROR, "Required parameter not specified: BACKUP_MODE (-b, --backup-mode)"); + elog(ERROR, "No backup mode specified.\n" + "Please specify it either using environment variable BACKUP_MODE or\n" + "command line option --backup-mode (-b)"); if (current.backup_mode != BACKUP_MODE_FULL && current.backup_mode != BACKUP_MODE_DIFF_PTRACK && current.backup_mode != BACKUP_MODE_DIFF_DELTA) elog(ERROR, "Only \"FULL\", \"PTRACK\" and \"DELTA\" modes are supported with the \"%s\" command", get_subcmd_name(backup_subcmd)); if (!stream_wal) elog(INFO, "--stream is required, forcing stream mode"); current.stream = stream_wal = true; if (instance_config.external_dir_str) - elog(ERROR, "external directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd)); + elog(ERROR, "External directories not supported fom \"%s\" command", get_subcmd_name(backup_subcmd)); // TODO check instance_config.conn_opt } @@ -939,14 +988,20 @@ main(int argc, char *argv[]) case BACKUP_CMD: { current.stream = stream_wal; + if (start_time != INVALID_BACKUP_ID) + elog(WARNING, "Please do not use the --start-time option to start backup. " + "This is a service option required to work with other extensions. " + "We do not guarantee future support for this flag."); + /* sanity */ if (current.backup_mode == BACKUP_MODE_INVALID) - elog(ERROR, "required parameter not specified: BACKUP_MODE " - "(-b, --backup-mode)"); + elog(ERROR, "No backup mode specified.\n" + "Please specify it either using environment variable BACKUP_MODE or\n" + "command line option --backup-mode (-b)"); return do_backup(instanceState, set_backup_params, - no_validate, no_sync, backup_logs); + no_validate, no_sync, backup_logs, start_time); } case CATCHUP_CMD: return do_catchup(catchup_source_pgdata, catchup_destination_pgdata, num_threads, !no_sync, @@ -997,7 +1052,7 @@ main(int argc, char *argv[]) do_merge(instanceState, current.backup_id, no_validate, no_sync); break; case SHOW_CONFIG_CMD: - do_show_config(); + do_show_config(show_base_units); break; case SET_CONFIG_CMD: do_set_config(instanceState, false); @@ -1023,6 +1078,8 @@ main(int argc, char *argv[]) break; } + free_environment_locale(); + return 0; } @@ -1141,8 +1198,8 @@ opt_datname_exclude_list(ConfigOption *opt, const char *arg) void opt_datname_include_list(ConfigOption *opt, const char *arg) { - if (strcmp(arg, "tempate0") == 0 || - strcmp(arg, "tempate1") == 0) + if (strcmp(arg, "template0") == 0 || + strcmp(arg, "template1") == 0) elog(ERROR, "Databases 'template0' and 'template1' cannot be used for partial restore or validation"); opt_parser_add_to_parray_helper(&datname_include_list, arg); diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 4cd65980c..ae99e0605 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -50,6 +50,12 @@ #include #endif +#if PG_VERSION_NUM >= 150000 +// _() is explicitly undefined in libpq-int.h +// https://github.com/postgres/postgres/commit/28ec316787674dd74d00b296724a009b6edc2fb0 +#define _(s) gettext(s) +#endif + /* Wrap the code that we're going to delete after refactoring in this define*/ #define REFACTORE_ME @@ -85,6 +91,7 @@ extern const char *PROGRAM_EMAIL; #define DATABASE_MAP "database_map" #define HEADER_MAP "page_header_map" #define HEADER_MAP_TMP "page_header_map_tmp" +#define XLOG_CONTROL_BAK_FILE XLOG_CONTROL_FILE".pbk.bak" /* default replication slot names */ #define DEFAULT_TEMP_SLOT_NAME "pg_probackup_slot"; @@ -104,6 +111,8 @@ extern const char *PROGRAM_EMAIL; /* 64-bit xid support for PGPRO_EE */ #ifndef PGPRO_EE #define XID_FMT "%u" +#elif !defined(XID_FMT) +#define XID_FMT UINT64_FORMAT #endif #ifndef STDIN_FILENO @@ -129,6 +138,9 @@ extern const char *PROGRAM_EMAIL; #define XRecOffIsNull(xlrp) \ ((xlrp) % XLOG_BLCKSZ == 0) +/* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */ +#define base36bufsize 14 + /* Text Coloring macro */ #define TC_LEN 11 #define TC_RED "\033[0;31m" @@ -145,7 +157,6 @@ extern const char *PROGRAM_EMAIL; #define TC_CYAN_BOLD "\033[1;36m" #define TC_RESET "\033[0m" - typedef struct RedoParams { TimeLineID tli; @@ -171,6 +182,7 @@ typedef enum DestDirIncrCompatibility POSTMASTER_IS_RUNNING, SYSTEM_ID_MISMATCH, BACKUP_LABEL_EXISTS, + PARTIAL_INCREMENTAL_FORBIDDEN, DEST_IS_NOT_OK, DEST_OK } DestDirIncrCompatibility; @@ -209,11 +221,14 @@ typedef enum CompressAlg typedef enum ForkName { + none, vm, fsm, cfm, init, - ptrack + ptrack, + cfs_bck, + cfm_bck } ForkName; #define INIT_FILE_CRC32(use_crc32c, crc) \ @@ -269,6 +284,7 @@ typedef struct pgFile int segno; /* Segment number for ptrack */ int n_blocks; /* number of blocks in the data file in data directory */ bool is_cfs; /* Flag to distinguish files compressed by CFS*/ + struct pgFile *cfs_chain; /* linked list of CFS segment's cfm, bck, cfm_bck related files */ int external_dir_num; /* Number of external directory. 0 if not external */ bool exists_in_prev; /* Mark files, both data and regular, that exists in previous backup */ CompressAlg compress_alg; /* compression algorithm applied to the file */ @@ -283,6 +299,8 @@ typedef struct pgFile pg_off_t hdr_off; /* offset in header map */ int hdr_size; /* length of headers */ bool excluded; /* excluded via --exclude-path option */ + bool skip_cfs_nested; /* mark to skip in processing treads as nested to cfs_chain */ + bool remove_from_list; /* tmp flag to clean up files list from temp and unlogged tables */ } pgFile; typedef struct page_map_entry @@ -338,11 +356,11 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.5" +#define PROGRAM_VERSION "2.5.15" /* update when remote agent API or behaviour changes */ -#define AGENT_PROTOCOL_VERSION 20501 -#define AGENT_PROTOCOL_VERSION_STR "2.5.1" +#define AGENT_PROTOCOL_VERSION 20509 +#define AGENT_PROTOCOL_VERSION_STR "2.5.9" /* update only when changing storage format */ #define STORAGE_FORMAT_VERSION "2.4.4" @@ -450,7 +468,10 @@ struct pgBackup { BackupMode backup_mode; /* Mode - one of BACKUP_MODE_xxx above*/ time_t backup_id; /* Identifier of the backup. - * Currently it's the same as start_time */ + * By default it's the same as start_time + * but can be increased if same backup_id + * already exists. It can be also set by + * start_time parameter */ BackupStatus status; /* Status - one of BACKUP_STATUS_xxx above*/ TimeLineID tli; /* timeline of start and stop backup lsns */ XLogRecPtr start_lsn; /* backup's starting transaction log location */ @@ -522,6 +543,8 @@ struct pgBackup /* map used for access to page headers */ HeaderMap hdr_map; + + char backup_id_encoded[base36bufsize]; }; /* Recovery target for restore and validate subcommands */ @@ -542,6 +565,7 @@ typedef struct pgRecoveryTarget const char *target_stop; const char *target_name; const char *target_action; + const char *target_tli_string; /* timeline number, "current" or "latest" from recovery_target_timeline option*/ } pgRecoveryTarget; /* Options needed for restore and validate commands */ @@ -566,6 +590,9 @@ typedef struct pgRestoreParams /* options for partial restore */ PartialRestoreType partial_restore_type; parray *partial_db_list; + bool allow_partial_incremental; + + char* waldir; } pgRestoreParams; /* Options needed for set-backup command */ @@ -761,6 +788,11 @@ typedef struct StopBackupCallbackParams strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ strcmp((fname) + XLOG_FNAME_LEN, ".part") == 0) +#define IsTempPartialXLogFileName(fname) \ + (strlen(fname) == XLOG_FNAME_LEN + strlen(".partial.part") && \ + strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ + strcmp((fname) + XLOG_FNAME_LEN, ".partial.part") == 0) + #define IsTempCompressXLogFileName(fname) \ (strlen(fname) == XLOG_FNAME_LEN + strlen(".gz.part") && \ strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ @@ -787,7 +819,7 @@ extern bool perm_slot; extern bool smooth_checkpoint; /* remote probackup options */ -extern char* remote_agent; +extern bool remote_agent; extern bool exclusive_backup; @@ -840,7 +872,7 @@ extern char** commands_args; /* in backup.c */ extern int do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, - bool no_validate, bool no_sync, bool backup_logs); + bool no_validate, bool no_sync, bool backup_logs, time_t start_time); extern void do_checkdb(bool need_amcheck, ConnectionOptions conn_opt, char *pgdata); extern BackupMode parse_backup_mode(const char *value); @@ -863,18 +895,28 @@ extern bool satisfy_recovery_target(const pgBackup *backup, const pgRecoveryTarget *rt); extern pgRecoveryTarget *parseRecoveryTargetOptions( const char *target_time, const char *target_xid, - const char *target_inclusive, TimeLineID target_tli, const char* target_lsn, + const char *target_inclusive, const char *target_tli_string, const char* target_lsn, const char *target_stop, const char *target_name, const char *target_action); extern parray *get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, PartialRestoreType partial_restore_type); +extern const char* backup_id_of(pgBackup *backup); +extern void reset_backup_id(pgBackup *backup); + extern parray *get_backup_filelist(pgBackup *backup, bool strict); extern parray *read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict); extern bool tliIsPartOfHistory(const parray *timelines, TimeLineID tli); extern DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, - IncrRestoreMode incremental_mode); + IncrRestoreMode incremental_mode, + parray *partial_db_list, + bool allow_partial_incremental); + +/* in remote.c */ +extern void check_remote_agent_compatibility(int agent_version, + char *compatibility_str, size_t compatibility_str_max_size); +extern size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_buf_size); /* in merge.c */ extern void do_merge(InstanceState *instanceState, time_t backup_id, bool no_validate, bool no_sync); @@ -897,7 +939,7 @@ extern void do_archive_get(InstanceState *instanceState, InstanceConfig *instanc char *wal_file_name, int batch_size, bool validate_wal); /* in configure.c */ -extern void do_show_config(void); +extern void do_show_config(bool show_base_units); extern void do_set_config(InstanceState *instanceState, bool missing_ok); extern void init_config(InstanceConfig *config, const char *instance_name); extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); @@ -905,6 +947,8 @@ extern InstanceConfig *readInstanceConfigFile(InstanceState *instanceState); /* in show.c */ extern int do_show(CatalogState *catalogState, InstanceState *instanceState, time_t requested_backup_id, bool show_archive); +extern void memorize_environment_locale(void); +extern void free_environment_locale(void); /* in delete.c */ extern void do_delete(InstanceState *instanceState, time_t backup_id); @@ -981,7 +1025,7 @@ extern void write_backup_filelist(pgBackup *backup, parray *files, const char *root, parray *external_list, bool sync); -extern void pgBackupCreateDir(pgBackup *backup, const char *backup_instance_path); +extern void pgBackupInitDir(pgBackup *backup, const char *backup_instance_path); extern void pgNodeInit(PGNodeInfo *node); extern void pgBackupInit(pgBackup *backup); extern void pgBackupFree(void *backup); @@ -1010,8 +1054,9 @@ extern CompressAlg parse_compress_alg(const char *arg); extern const char* deparse_compress_alg(int alg); /* in dir.c */ -extern bool get_control_value(const char *str, const char *name, - char *value_str, int64 *value_int64, bool is_mandatory); +extern bool get_control_value_int64(const char *str, const char *name, int64 *value_int64, bool is_mandatory); +extern bool get_control_value_str(const char *str, const char *name, + char *value_str, size_t value_str_size, bool is_mandatory); extern void dir_list_file(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num, fio_location location); @@ -1022,7 +1067,8 @@ extern void create_data_directories(parray *dest_files, const char *backup_dir, bool extract_tablespaces, bool incremental, - fio_location location); + fio_location location, + const char *waldir_path); extern void read_tablespace_map(parray *links, const char *backup_dir); extern void opt_tablespace_map(ConfigOption *opt, const char *arg); @@ -1061,6 +1107,7 @@ extern void fio_pgFileDelete(pgFile *file, const char *full_path); extern void pgFileFree(void *file); extern pg_crc32 pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok); +extern pg_crc32 pgFileGetCRCTruncated(const char *file_path, bool use_crc32c, bool missing_ok); extern pg_crc32 pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok); extern int pgFileMapComparePath(const void *f1, const void *f2); @@ -1076,6 +1123,7 @@ extern int pgCompareString(const void *str1, const void *str2); extern int pgPrefixCompareString(const void *str1, const void *str2); extern int pgCompareOid(const void *f1, const void *f2); extern void pfilearray_clear_locks(parray *file_list); +extern bool set_forkname(pgFile *file); /* in data.c */ extern bool check_data_file(ConnectionArgs *arguments, pgFile *file, @@ -1158,11 +1206,12 @@ extern uint64 get_system_identifier(const char *pgdata_path, fio_location locati extern uint64 get_remote_system_identifier(PGconn *conn); extern uint32 get_data_checksum_version(bool safe); extern pg_crc32c get_pgcontrol_checksum(const char *pgdata_path); -extern DBState get_system_dbstate(const char *pgdata_path, fio_location location); extern uint32 get_xlog_seg_size(const char *pgdata_path); extern void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo); extern void set_min_recovery_point(pgFile *file, const char *backup_path, XLogRecPtr stop_backup_lsn); +extern void get_control_file_or_back_file(const char *pgdata_path, fio_location location, + ControlFileData *control); extern void copy_pgcontrol_file(const char *from_fullpath, fio_location from_location, const char *to_fullpath, fio_location to_location, pgFile *file); @@ -1170,8 +1219,9 @@ extern void time2iso(char *buf, size_t len, time_t time, bool utc); extern const char *status2str(BackupStatus status); const char *status2str_color(BackupStatus status); extern BackupStatus str2status(const char *status); -extern const char *base36enc(long unsigned int value); -extern char *base36enc_dup(long unsigned int value); +extern const char *base36enc_to(long unsigned int value, char buf[ARG_SIZE_HINT base36bufsize]); +/* Abuse C99 Compound Literal's lifetime */ +#define base36enc(value) (base36enc_to((value), (char[base36bufsize]){0})) extern long unsigned int base36dec(const char *text); extern uint32 parse_server_version(const char *server_version_str); extern uint32 parse_program_version(const char *program_version); @@ -1223,9 +1273,11 @@ extern int fio_copy_pages(const char *to_fullpath, const char *from_fullpath, pg XLogRecPtr horizonLsn, int calg, int clevel, uint32 checksum_version, bool use_pagemap, BlockNumber *err_blknum, char **errormsg); /* return codes for fio_send_pages */ -extern int fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg); -extern int fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, +extern int fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg); +extern int fio_send_file(const char *from_fullpath, FILE* out, bool cut_zero_tail, pgFile *file, char **errormsg); +extern int fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, + pgFile *file, char **errormsg); extern void fio_list_dir(parray *files, const char *root, bool exclude, bool follow_symlink, bool add_root, bool backup_logs, bool skip_hidden, int external_dir_num); diff --git a/src/ptrack.c b/src/ptrack.c index ebcba1dd4..d27629e45 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -214,7 +214,7 @@ pg_ptrack_get_pagemapset(PGconn *backup_conn, const char *ptrack_schema, pfree(params[0]); if (PQnfields(res) != 2) - elog(ERROR, "cannot get ptrack pagemapset"); + elog(ERROR, "Cannot get ptrack pagemapset"); /* sanity ? */ diff --git a/src/restore.c b/src/restore.c index d8d808a4e..f9310dcee 100644 --- a/src/restore.c +++ b/src/restore.c @@ -3,7 +3,7 @@ * restore.c: restore DB cluster and archived WAL. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -39,6 +39,8 @@ typedef struct int ret; } restore_files_arg; +static bool control_downloaded = false; +static ControlFileData instance_control; static void print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup, @@ -76,12 +78,8 @@ static void set_orphan_status(parray *backups, pgBackup *parent_backup) { /* chain is intact, but at least one parent is invalid */ - char *parent_backup_id; int j; - /* parent_backup_id is a human-readable backup ID */ - parent_backup_id = base36enc_dup(parent_backup->start_time); - for (j = 0; j < parray_num(backups); j++) { @@ -96,19 +94,19 @@ set_orphan_status(parray *backups, pgBackup *parent_backup) elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", - base36enc(backup->start_time), - parent_backup_id, + backup_id_of(backup), + backup_id_of(parent_backup), status2str(parent_backup->status)); } else { elog(WARNING, "Backup %s has parent %s with status: %s", - base36enc(backup->start_time), parent_backup_id, + backup_id_of(backup), + backup_id_of(parent_backup), status2str(parent_backup->status)); } } } - pg_free(parent_backup_id); } /* @@ -135,13 +133,14 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg XLogRecPtr shift_lsn = InvalidXLogRecPtr; if (instanceState == NULL) - elog(ERROR, "required parameter not specified: --instance"); + elog(ERROR, "Required parameter not specified: --instance"); if (params->is_restore) { if (instance_config.pgdata == NULL) - elog(ERROR, - "required parameter not specified: PGDATA (-D, --pgdata)"); + elog(ERROR, "No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\n" + "command line option --pgdata (-D)"); /* Check if restore destination empty */ if (!dir_is_empty(instance_config.pgdata, FIO_DB_HOST)) @@ -153,6 +152,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (params->incremental_mode != INCR_NONE) { DestDirIncrCompatibility rc; + const char *message = NULL; bool ok_to_go = true; elog(INFO, "Running incremental restore into nonempty directory: \"%s\"", @@ -160,12 +160,15 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg rc = check_incremental_compatibility(instance_config.pgdata, instance_config.system_identifier, - params->incremental_mode); + params->incremental_mode, + params->partial_db_list, + params->allow_partial_incremental); if (rc == POSTMASTER_IS_RUNNING) { /* Even with force flag it is unwise to run * incremental restore over running instance */ + message = "Postmaster is running."; ok_to_go = false; } else if (rc == SYSTEM_ID_MISMATCH) @@ -177,7 +180,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (params->incremental_mode != INCR_NONE && params->force) cleanup_pgdata = true; else + { + message = "System ID mismatch."; ok_to_go = false; + } } else if (rc == BACKUP_LABEL_EXISTS) { @@ -190,7 +196,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg * to calculate switchpoint. */ if (params->incremental_mode == INCR_LSN) + { + message = "Backup label exists. Cannot use incremental restore in LSN mode."; ok_to_go = false; + } } else if (rc == DEST_IS_NOT_OK) { @@ -199,11 +208,16 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg * so we cannot be sure that postmaster is running or not. * It is better to just error out. */ + message = "We cannot be sure about the database state."; + ok_to_go = false; + } else if (rc == PARTIAL_INCREMENTAL_FORBIDDEN) + { + message = "Partial incremental restore into non-empty PGDATA is forbidden."; ok_to_go = false; } if (!ok_to_go) - elog(ERROR, "Incremental restore is not allowed"); + elog(ERROR, "Incremental restore is not allowed: %s", message); } else elog(ERROR, "Restore destination is not empty: \"%s\"", @@ -243,7 +257,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg current_backup->status != BACKUP_STATUS_DONE)) { elog(WARNING, "Skipping backup %s, because it has non-valid status: %s", - base36enc(current_backup->start_time), status2str(current_backup->status)); + backup_id_of(current_backup), status2str(current_backup->status)); continue; } @@ -273,10 +287,10 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg current_backup->status == BACKUP_STATUS_RUNNING) && (!params->no_validate || params->force)) elog(WARNING, "Backup %s has status: %s", - base36enc(current_backup->start_time), status2str(current_backup->status)); + backup_id_of(current_backup), status2str(current_backup->status)); else elog(ERROR, "Backup %s has status: %s", - base36enc(current_backup->start_time), status2str(current_backup->status)); + backup_id_of(current_backup), status2str(current_backup->status)); } if (rt->target_tli) @@ -294,7 +308,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (!satisfy_timeline(timelines, current_backup->tli, current_backup->stop_lsn)) { if (target_backup_id != INVALID_BACKUP_ID) - elog(ERROR, "target backup %s does not satisfy target timeline", + elog(ERROR, "Target backup %s does not satisfy target timeline", base36enc(target_backup_id)); else /* Try to find another backup that satisfies target timeline */ @@ -348,11 +362,11 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg /* chain is broken, determine missing backup ID * and orphinize all his descendants */ - char *missing_backup_id; + const char *missing_backup_id; time_t missing_backup_start_time; missing_backup_start_time = tmp_backup->parent_backup; - missing_backup_id = base36enc_dup(tmp_backup->parent_backup); + missing_backup_id = base36enc(tmp_backup->parent_backup); for (j = 0; j < parray_num(backups); j++) { @@ -369,18 +383,17 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg write_backup_status(backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s is missing", - base36enc(backup->start_time), missing_backup_id); + backup_id_of(backup), missing_backup_id); } else { elog(WARNING, "Backup %s has missing parent %s", - base36enc(backup->start_time), missing_backup_id); + backup_id_of(backup), missing_backup_id); } } } - pg_free(missing_backup_id); /* No point in doing futher */ - elog(ERROR, "%s of backup %s failed.", action, base36enc(dest_backup->start_time)); + elog(ERROR, "%s of backup %s failed.", action, backup_id_of(dest_backup)); } else if (result == ChainIsInvalid) { @@ -391,7 +404,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg /* sanity */ if (!tmp_backup) elog(ERROR, "Parent full backup for the given backup %s was not found", - base36enc(dest_backup->start_time)); + backup_id_of(dest_backup)); } /* We have found full backup */ @@ -490,6 +503,9 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (redo.checksum_version == 0) elog(ERROR, "Incremental restore in 'lsn' mode require " "data_checksums to be enabled in destination data directory"); + if (!control_downloaded) + get_control_file_or_back_file(instance_config.pgdata, FIO_DB_HOST, + &instance_control); timelines = read_timeline_history(instanceState->instance_wal_subdir_path, redo.tli, false); @@ -511,7 +527,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (redo.tli == tmp_backup->tli) { elog(INFO, "Backup %s is chosen as shiftpoint, its Stop LSN will be used as shift LSN", - base36enc(tmp_backup->start_time)); + backup_id_of(tmp_backup)); shift_lsn = tmp_backup->stop_lsn; break; @@ -535,7 +551,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg else elog(INFO, "Backup %s cannot be a shiftpoint, " "because its tli %i is not in history of redo timeline %i", - base36enc(tmp_backup->start_time), tmp_backup->tli, redo.tli); + backup_id_of(tmp_backup), tmp_backup->tli, redo.tli); } tmp_backup = tmp_backup->parent_backup_link; @@ -544,13 +560,13 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (XLogRecPtrIsInvalid(shift_lsn)) elog(ERROR, "Cannot perform incremental restore of backup chain %s in 'lsn' mode, " "because destination directory redo point %X/%X on tli %i is out of reach", - base36enc(dest_backup->start_time), + backup_id_of(dest_backup), (uint32) (redo.lsn >> 32), (uint32) redo.lsn, redo.tli); else elog(INFO, "Destination directory redo point %X/%X on tli %i is " "within reach of backup %s with Stop LSN %X/%X on tli %i", (uint32) (redo.lsn >> 32), (uint32) redo.lsn, redo.tli, - base36enc(tmp_backup->start_time), + backup_id_of(tmp_backup), (uint32) (tmp_backup->stop_lsn >> 32), (uint32) tmp_backup->stop_lsn, tmp_backup->tli); @@ -564,7 +580,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (!params->is_restore || !params->no_validate) { if (dest_backup->backup_mode != BACKUP_MODE_FULL) - elog(INFO, "Validating parents for backup %s", base36enc(dest_backup->start_time)); + elog(INFO, "Validating parents for backup %s", backup_id_of(dest_backup)); /* * Validate backups from base_full_backup to dest_backup. @@ -577,7 +593,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (!lock_backup(tmp_backup, true, false)) { elog(ERROR, "Cannot lock backup %s directory", - base36enc(tmp_backup->start_time)); + backup_id_of(tmp_backup)); } /* validate datafiles only */ @@ -624,27 +640,27 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg dest_backup->status == BACKUP_STATUS_DONE) { if (params->no_validate) - elog(WARNING, "Backup %s is used without validation.", base36enc(dest_backup->start_time)); + elog(WARNING, "Backup %s is used without validation.", backup_id_of(dest_backup)); else - elog(INFO, "Backup %s is valid.", base36enc(dest_backup->start_time)); + elog(INFO, "Backup %s is valid.", backup_id_of(dest_backup)); } else if (dest_backup->status == BACKUP_STATUS_CORRUPT) { if (params->force) - elog(WARNING, "Backup %s is corrupt.", base36enc(dest_backup->start_time)); + elog(WARNING, "Backup %s is corrupt.", backup_id_of(dest_backup)); else - elog(ERROR, "Backup %s is corrupt.", base36enc(dest_backup->start_time)); + elog(ERROR, "Backup %s is corrupt.", backup_id_of(dest_backup)); } else if (dest_backup->status == BACKUP_STATUS_ORPHAN) { if (params->force) - elog(WARNING, "Backup %s is orphan.", base36enc(dest_backup->start_time)); + elog(WARNING, "Backup %s is orphan.", backup_id_of(dest_backup)); else - elog(ERROR, "Backup %s is orphan.", base36enc(dest_backup->start_time)); + elog(ERROR, "Backup %s is orphan.", backup_id_of(dest_backup)); } else elog(ERROR, "Backup %s has status: %s", - base36enc(dest_backup->start_time), status2str(dest_backup->status)); + backup_id_of(dest_backup), status2str(dest_backup->status)); /* We ensured that all backups are valid, now restore if required */ @@ -668,9 +684,14 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg if (rt->lsn_string && parse_server_version(dest_backup->server_version) < 100000) elog(ERROR, "Backup %s was created for version %s which doesn't support recovery_target_lsn", - base36enc(dest_backup->start_time), + backup_id_of(dest_backup), dest_backup->server_version); + if (instance_config.remote.host) + elog(INFO, "Restoring the database from backup %s on %s", backup_id_of(dest_backup), instance_config.remote.host); + else + elog(INFO, "Restoring the database from backup %s", backup_id_of(dest_backup)); + restore_chain(dest_backup, parent_chain, dbOid_exclude_list, params, instance_config.pgdata, no_sync, cleanup_pgdata, backup_has_tblspc); @@ -683,7 +704,7 @@ do_restore_or_validate(InstanceState *instanceState, time_t target_backup_id, pg fio_disconnect(); elog(INFO, "%s of backup %s completed.", - action, base36enc(dest_backup->start_time)); + action, backup_id_of(dest_backup)); /* cleanup */ parray_walk(backups, pgBackupFree); @@ -704,10 +725,13 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, bool backup_has_tblspc) { int i; - char timestamp[100]; parray *pgdata_files = NULL; parray *dest_files = NULL; parray *external_dirs = NULL; + pgFile *dest_pg_control_file = NULL; + char dest_pg_control_fullpath[MAXPGPATH]; + char dest_pg_control_bak_fullpath[MAXPGPATH]; + /* arrays with meta info for multi threaded backup */ pthread_t *threads; restore_files_arg *threads_args; @@ -723,9 +747,6 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, time_t start_time, end_time; /* Preparations for actual restoring */ - time2iso(timestamp, lengthof(timestamp), dest_backup->start_time, false); - elog(INFO, "Restoring the database from backup at %s", timestamp); - dest_files = get_backup_filelist(dest_backup, true); /* Lock backup chain and make sanity checks */ @@ -734,17 +755,17 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); if (!lock_backup(backup, true, false)) - elog(ERROR, "Cannot lock backup %s", base36enc(backup->start_time)); + elog(ERROR, "Cannot lock backup %s", backup_id_of(backup)); if (backup->status != BACKUP_STATUS_OK && backup->status != BACKUP_STATUS_DONE) { if (params->force) elog(WARNING, "Backup %s is not valid, restore is forced", - base36enc(backup->start_time)); + backup_id_of(backup)); else elog(ERROR, "Backup %s cannot be restored because it is not valid", - base36enc(backup->start_time)); + backup_id_of(backup)); } /* confirm block size compatibility */ @@ -781,7 +802,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, use_bitmap = false; if (params->incremental_mode != INCR_NONE) - elog(ERROR, "incremental restore is not possible for backups older than 2.3.0 version"); + elog(ERROR, "Incremental restore is not possible for backups older than 2.3.0 version"); } /* There is no point in bitmap restore, when restoring a single FULL backup, @@ -801,7 +822,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, create_data_directories(dest_files, instance_config.pgdata, dest_backup->root_dir, backup_has_tblspc, params->incremental_mode != INCR_NONE, - FIO_DB_HOST); + FIO_DB_HOST, params->waldir); /* * Restore dest_backup external directories. @@ -843,7 +864,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, external_path = parray_get(external_dirs, file->external_dir_num - 1); join_path_components(dirpath, external_path, file->rel_path); - elog(VERBOSE, "Create external directory \"%s\"", dirpath); + elog(LOG, "Create external directory \"%s\"", dirpath); fio_mkdir(dirpath, file->mode, FIO_DB_HOST); } } @@ -911,6 +932,11 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, pg_strcasecmp(file->name, RELMAPPER_FILENAME) == 0) redundant = true; + /* global/pg_control.pbk.bak are always keeped, because it's needed for restart failed incremental restore */ + if (file->external_dir_num == 0 && + pg_strcasecmp(file->rel_path, XLOG_CONTROL_BAK_FILE) == 0) + redundant = false; + /* do not delete the useful internal directories */ if (S_ISDIR(file->mode) && !redundant) continue; @@ -923,7 +949,7 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, join_path_components(fullpath, pgdata_path, file->rel_path); fio_delete(file->mode, fullpath, FIO_DB_HOST); - elog(VERBOSE, "Deleted file \"%s\"", fullpath); + elog(LOG, "Deleted file \"%s\"", fullpath); /* shrink pgdata list */ pgFileFree(file); @@ -963,6 +989,42 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, dest_bytes = dest_backup->pgdata_bytes; pretty_size(dest_bytes, pretty_dest_bytes, lengthof(pretty_dest_bytes)); + /* + * [Issue #313] + * find pg_control file (in already sorted earlier dest_files, see parray_qsort(backup->files...)) + * and exclude it from list for future special processing + */ + { + int control_file_elem_index; + pgFile search_key; + MemSet(&search_key, 0, sizeof(pgFile)); + /* pgFileCompareRelPathWithExternal uses only .rel_path and .external_dir_num for comparision */ + search_key.rel_path = XLOG_CONTROL_FILE; + search_key.external_dir_num = 0; + control_file_elem_index = parray_bsearch_index(dest_files, &search_key, pgFileCompareRelPathWithExternal); + + if (control_file_elem_index < 0) + elog(ERROR, "File \"%s\" not found in backup %s", XLOG_CONTROL_FILE, base36enc(dest_backup->start_time)); + dest_pg_control_file = (pgFile *) parray_get(dest_files, control_file_elem_index); + parray_remove(dest_files, control_file_elem_index); + + join_path_components(dest_pg_control_fullpath, pgdata_path, XLOG_CONTROL_FILE); + join_path_components(dest_pg_control_bak_fullpath, pgdata_path, XLOG_CONTROL_BAK_FILE); + /* + * rename (if it exist) dest control file before restoring + * if it doesn't exist, that mean, that we already restoring in a previously failed + * pgdata, where XLOG_CONTROL_BAK_FILE exist + */ + if (params->incremental_mode != INCR_NONE) + { + if (fio_access(dest_pg_control_fullpath,F_OK,FIO_DB_HOST) == 0){ + if (fio_rename(dest_pg_control_fullpath, dest_pg_control_bak_fullpath, FIO_DB_HOST) < 0) + elog(WARNING, "Cannot rename file \"%s\" to \"%s\": %s", + dest_pg_control_fullpath, dest_pg_control_bak_fullpath, strerror(errno)); + } + } + } + elog(INFO, "Start restoring backup files. PGDATA size: %s", pretty_dest_bytes); time(&start_time); thread_interrupted = false; @@ -1003,6 +1065,32 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, total_bytes += threads_args[i].restored_bytes; } + /* [Issue #313] copy pg_control at very end */ + if (restore_isok) + { + FILE *out = NULL; + elog(progress ? INFO : LOG, "Progress: Restore file \"%s\"", + dest_pg_control_file->rel_path); + + out = fio_fopen(dest_pg_control_fullpath, PG_BINARY_R "+", FIO_DB_HOST); + + total_bytes += restore_non_data_file(parent_chain, + dest_backup, + dest_pg_control_file, + out, + dest_pg_control_fullpath, false); + fio_fclose(out); + /* Now backup control file can be deleted */ + if (params->incremental_mode != INCR_NONE) + { + pgFile *dst_control; + dst_control = pgFileNew(dest_pg_control_bak_fullpath, XLOG_CONTROL_BAK_FILE, + true,0, FIO_BACKUP_HOST); + fio_delete(dst_control->mode, dest_pg_control_bak_fullpath, FIO_LOCAL_HOST); + pgFileFree(dst_control); + } + } + time(&end_time); pretty_time_interval(difftime(end_time, start_time), pretty_time, lengthof(pretty_time)); @@ -1087,6 +1175,8 @@ restore_chain(pgBackup *dest_backup, parray *parent_chain, parray_free(pgdata_files); } + if(dest_pg_control_file) pgFileFree(dest_pg_control_file); + for (i = parray_num(parent_chain) - 1; i >= 0; i--) { pgBackup *backup = (pgBackup *) parray_get(parent_chain, i); @@ -1131,9 +1221,8 @@ restore_files(void *arg) if (interrupted || thread_interrupted) elog(ERROR, "Interrupted during restore"); - if (progress) - elog(INFO, "Progress: (%d/%lu). Restore file \"%s\"", - i + 1, n_files, dest_file->rel_path); + elog(progress ? INFO : LOG, "Progress: (%d/%lu). Restore file \"%s\"", + i + 1, n_files, dest_file->rel_path); /* Only files from pgdata can be skipped by partial restore */ if (arguments->dbOid_exclude_list && dest_file->external_dir_num == 0) @@ -1149,7 +1238,7 @@ restore_files(void *arg) create_empty_file(FIO_BACKUP_HOST, arguments->to_root, FIO_DB_HOST, dest_file); - elog(VERBOSE, "Skip file due to partial restore: \"%s\"", + elog(LOG, "Skip file due to partial restore: \"%s\"", dest_file->rel_path); continue; } @@ -1159,7 +1248,7 @@ restore_files(void *arg) if ((dest_file->external_dir_num == 0) && strcmp(PG_TABLESPACE_MAP_FILE, dest_file->rel_path) == 0) { - elog(VERBOSE, "Skip tablespace_map"); + elog(LOG, "Skip tablespace_map"); continue; } @@ -1167,7 +1256,7 @@ restore_files(void *arg) if ((dest_file->external_dir_num == 0) && strcmp(DATABASE_MAP, dest_file->rel_path) == 0) { - elog(VERBOSE, "Skip database_map"); + elog(LOG, "Skip database_map"); continue; } @@ -1239,9 +1328,9 @@ restore_files(void *arg) strerror(errno)); if (!dest_file->is_datafile || dest_file->is_cfs) - elog(VERBOSE, "Restoring nonedata file: \"%s\"", to_fullpath); + elog(LOG, "Restoring non-data file: \"%s\"", to_fullpath); else - elog(VERBOSE, "Restoring data file: \"%s\"", to_fullpath); + elog(LOG, "Restoring data file: \"%s\"", to_fullpath); // If destination file is 0 sized, then just close it and go for the next if (dest_file->write_size == 0) @@ -1261,10 +1350,10 @@ restore_files(void *arg) } else { - /* disable stdio buffering for local destination nonedata file */ + /* disable stdio buffering for local destination non-data file */ if (!fio_is_remote_file(out)) setvbuf(out, NULL, _IONBF, BUFSIZ); - /* Destination file is nonedata file */ + /* Destination file is non-data file */ arguments->restored_bytes += restore_non_data_file(arguments->parent_chain, arguments->dest_backup, dest_file, out, to_fullpath, already_exists); @@ -1322,8 +1411,10 @@ create_recovery_conf(InstanceState *instanceState, time_t backup_id, } /* restore-target='latest' support */ - target_latest = rt->target_stop != NULL && - strcmp(rt->target_stop, "latest") == 0; + target_latest = (rt->target_tli_string != NULL && + strcmp(rt->target_tli_string, "latest") == 0) || + (rt->target_stop != NULL && + strcmp(rt->target_stop, "latest") == 0); target_immediate = rt->target_stop != NULL && strcmp(rt->target_stop, "immediate") == 0; @@ -1349,6 +1440,13 @@ create_recovery_conf(InstanceState *instanceState, time_t backup_id, rt->xid_string || rt->lsn_string || rt->target_name || target_immediate || target_latest || restore_command_provided) params->recovery_settings_mode = PITR_REQUESTED; + /* + * The recovery-target-timeline option can be 'latest' for streaming backups. + * This operation requires a WAL archive for PITR. + */ + if (rt->target_tli && backup->stream && params->recovery_settings_mode != PITR_REQUESTED) + elog(WARNING, "The '--recovery-target-timeline' option applied for STREAM backup. " + "The timeline number will be ignored."); elog(LOG, "----------------------------------------"); @@ -1428,14 +1526,20 @@ print_recovery_settings(InstanceState *instanceState, FILE *fp, pgBackup *backup fio_fprintf(fp, "recovery_target_timeline = '%u'\n", rt->target_tli); else { + if (rt->target_tli_string) + fio_fprintf(fp, "recovery_target_timeline = '%s'\n", rt->target_tli_string); + else if (rt->target_stop && (strcmp(rt->target_stop, "latest") == 0)) + fio_fprintf(fp, "recovery_target_timeline = 'latest'\n"); #if PG_VERSION_NUM >= 120000 - + else + { /* * In PG12 default recovery target timeline was changed to 'latest', which * is extremely risky. Explicitly preserve old behavior of recovering to current * timneline for PG12. */ fio_fprintf(fp, "recovery_target_timeline = 'current'\n"); + } #endif } @@ -1485,7 +1589,7 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu fp = fio_fopen(path, "w", FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_chmod(path, FILE_PERMISSION, FIO_DB_HOST) == -1) @@ -1505,7 +1609,7 @@ update_recovery_options_before_v12(InstanceState *instanceState, pgBackup *backu if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } #endif @@ -1544,7 +1648,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { /* file not found is not an error case */ if (errno != ENOENT) - elog(ERROR, "cannot stat file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot stat file \"%s\": %s", postgres_auto_path, strerror(errno)); st.st_size = 0; } @@ -1554,13 +1658,13 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { fp = fio_open_stream(postgres_auto_path, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", postgres_auto_path, strerror(errno)); } sprintf(postgres_auto_path_tmp, "%s.tmp", postgres_auto_path); fp_tmp = fio_fopen(postgres_auto_path_tmp, "w", FIO_DB_HOST); if (fp_tmp == NULL) - elog(ERROR, "cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); + elog(ERROR, "Cannot open \"%s\": %s", postgres_auto_path_tmp, strerror(errno)); while (fp && fgets(line, lengthof(line), fp)) { @@ -1618,11 +1722,11 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, { fp = fio_fopen(postgres_auto_path, "a", FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot open file \"%s\": %s", postgres_auto_path, strerror(errno)); fio_fprintf(fp, "\n# recovery settings added by pg_probackup restore of backup %s at '%s'\n", - base36enc(backup->start_time), current_time_str); + backup_id_of(backup), current_time_str); if (params->recovery_settings_mode == PITR_REQUESTED) print_recovery_settings(instanceState, fp, backup, params, rt); @@ -1632,7 +1736,7 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", postgres_auto_path, + elog(ERROR, "Cannot write file \"%s\": %s", postgres_auto_path, strerror(errno)); /* @@ -1652,12 +1756,12 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } @@ -1668,12 +1772,12 @@ update_recovery_options(InstanceState *instanceState, pgBackup *backup, fp = fio_fopen(path, PG_BINARY_W, FIO_DB_HOST); if (fp == NULL) - elog(ERROR, "cannot open file \"%s\": %s", path, + elog(ERROR, "Cannot open file \"%s\": %s", path, strerror(errno)); if (fio_fflush(fp) != 0 || fio_fclose(fp)) - elog(ERROR, "cannot write file \"%s\": %s", path, + elog(ERROR, "Cannot write file \"%s\": %s", path, strerror(errno)); } } @@ -1710,12 +1814,12 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict if (fd == NULL) { if (errno != ENOENT) - elog(ERROR, "could not open file \"%s\": %s", path, + elog(ERROR, "Could not open file \"%s\": %s", path, strerror(errno)); /* There is no history file for target timeline */ if (strict) - elog(ERROR, "recovery target timeline %u does not exist", + elog(ERROR, "Recovery target timeline %u does not exist", targetTLI); else return NULL; @@ -1749,12 +1853,12 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict { /* expect a numeric timeline ID as first field of line */ elog(ERROR, - "syntax error in history file: %s. Expected a numeric timeline ID.", + "Syntax error in history file: %s. Expected a numeric timeline ID.", fline); } if (nfields != 3) elog(ERROR, - "syntax error in history file: %s. Expected a transaction log switchpoint location.", + "Syntax error in history file: %s. Expected a transaction log switchpoint location.", fline); if (last_timeline && tli <= last_timeline->tli) @@ -1773,7 +1877,7 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict } if (fd && (ferror(fd))) - elog(ERROR, "Failed to read from file: \"%s\"", path); + elog(ERROR, "Failed to read from file: \"%s\"", path); if (fd) fclose(fd); @@ -1867,7 +1971,7 @@ pgRecoveryTarget * parseRecoveryTargetOptions(const char *target_time, const char *target_xid, const char *target_inclusive, - TimeLineID target_tli, + const char *target_tli_string, const char *target_lsn, const char *target_stop, const char *target_name, @@ -1940,7 +2044,20 @@ parseRecoveryTargetOptions(const char *target_time, target_inclusive); } - rt->target_tli = target_tli; + rt->target_tli_string = target_tli_string; + rt->target_tli = 0; + /* target_tli can contains timeline number, "current" or "latest" */ + if(target_tli_string && strcmp(target_tli_string, "current") != 0 && strcmp(target_tli_string, "latest") != 0) + { + errno = 0; + rt->target_tli = strtoul(target_tli_string, NULL, 10); + if (errno == EINVAL || errno == ERANGE || !rt->target_tli) + { + elog(ERROR, "Invalid value for '--recovery-target-timeline' option '%s'", + target_tli_string); + } + } + if (target_stop) { if ((strcmp(target_stop, "immediate") != 0) @@ -2032,7 +2149,7 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, if (!database_map_file) elog(ERROR, "Backup %s doesn't contain a database_map, partial restore is impossible.", - base36enc(backup->start_time)); + backup_id_of(backup)); join_path_components(path, backup->root_dir, DATABASE_DIR); join_path_components(database_map_path, path, DATABASE_MAP); @@ -2050,7 +2167,7 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, /* partial restore requested but database_map is missing */ if (!database_map) elog(ERROR, "Backup %s has empty or mangled database_map, partial restore is impossible.", - base36enc(backup->start_time)); + backup_id_of(backup)); /* * So we have a list of datnames and a database_map for it. @@ -2080,7 +2197,7 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, /* If specified datname is not found in database_map, error out */ if (!found_match) elog(ERROR, "Failed to find a database '%s' in database_map of backup %s", - datname, base36enc(backup->start_time)); + datname, backup_id_of(backup)); } /* At this moment only databases to exclude are left in the map */ @@ -2118,14 +2235,14 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, /* If specified datname is not found in database_map, error out */ if (!found_match) elog(ERROR, "Failed to find a database '%s' in database_map of backup %s", - datname, base36enc(backup->start_time)); + datname, backup_id_of(backup)); } } /* extra sanity: ensure that list is not empty */ if (!dbOid_exclude_list || parray_num(dbOid_exclude_list) < 1) elog(ERROR, "Failed to find a match in database_map of backup %s for partial restore", - base36enc(backup->start_time)); + backup_id_of(backup)); /* clean backup filelist */ if (files) @@ -2147,7 +2264,9 @@ get_dbOid_exclude_list(pgBackup *backup, parray *datname_list, */ DestDirIncrCompatibility check_incremental_compatibility(const char *pgdata, uint64 system_identifier, - IncrRestoreMode incremental_mode) + IncrRestoreMode incremental_mode, + parray *partial_db_list, + bool allow_partial_incremental) { uint64 system_id_pgdata; bool system_id_match = false; @@ -2188,9 +2307,12 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, * data files content, because based on pg_control information we will * choose a backup suitable for lsn based incremental restore. */ - elog(INFO, "Trying to read pg_control file in destination directory"); + elog(LOG, "Trying to read pg_control file in destination directory"); + + get_control_file_or_back_file(pgdata, FIO_DB_HOST, &instance_control); + control_downloaded = true; - system_id_pgdata = get_system_identifier(pgdata, FIO_DB_HOST, false); + system_id_pgdata = instance_control.system_identifier; if (system_id_pgdata == instance_config.system_identifier) system_id_match = true; @@ -2231,6 +2353,8 @@ check_incremental_compatibility(const char *pgdata, uint64 system_identifier, if (backup_label_exists) return BACKUP_LABEL_EXISTS; + if (partial_db_list && !allow_partial_incremental) + return PARTIAL_INCREMENTAL_FORBIDDEN; /* some other error condition */ if (!success) return DEST_IS_NOT_OK; diff --git a/src/show.c b/src/show.c index 22c40cf43..810262df6 100644 --- a/src/show.c +++ b/src/show.c @@ -3,7 +3,7 @@ * show.c: show backup information. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -12,6 +12,7 @@ #include #include +#include #include #include "utils/json.h" @@ -66,11 +67,49 @@ static void show_archive_plain(const char *instance_name, uint32 xlog_seg_size, parray *timelines_list, bool show_name); static void show_archive_json(const char *instance_name, uint32 xlog_seg_size, parray *tli_list); +static bool backup_has_tablespace_map(pgBackup *backup); static PQExpBufferData show_buf; static bool first_instance = true; static int32 json_level = 0; +static const char* lc_env_locale; +typedef enum { + LOCALE_C, // Used for formatting output to unify the dot-based floating point representation + LOCALE_ENV // Default environment locale +} output_numeric_locale; + +#ifdef HAVE_USELOCALE +static locale_t env_locale, c_locale; +#endif +void memorize_environment_locale() { + lc_env_locale = (const char *)getenv("LC_NUMERIC"); + lc_env_locale = lc_env_locale != NULL ? lc_env_locale : "C"; +#ifdef HAVE_USELOCALE + env_locale = newlocale(LC_NUMERIC_MASK, lc_env_locale, (locale_t)0); + c_locale = newlocale(LC_NUMERIC_MASK, "C", (locale_t)0); +#else +#ifdef HAVE__CONFIGTHREADLOCALE + _configthreadlocale(_ENABLE_PER_THREAD_LOCALE); +#endif +#endif +} + +void free_environment_locale() { +#ifdef HAVE_USELOCALE + freelocale(env_locale); + freelocale(c_locale); +#endif +} + +static void set_output_numeric_locale(output_numeric_locale loc) { +#ifdef HAVE_USELOCALE + uselocale(loc == LOCALE_C ? c_locale : env_locale); +#else + setlocale(LC_NUMERIC, loc == LOCALE_C ? "C" : lc_env_locale); +#endif +} + /* * Entry point of pg_probackup SHOW subcommand. */ @@ -99,7 +138,7 @@ do_show(CatalogState *catalogState, InstanceState *instanceState, show_instance_start(); for (i = 0; i < parray_num(instances); i++) { - InstanceState *instanceState = parray_get(instances, i); + instanceState = parray_get(instances, i); if (interrupted) elog(ERROR, "Interrupted during show"); @@ -164,22 +203,22 @@ pretty_size(int64 size, char *buf, size_t len) return; } - if (Abs(size) < limit) + if (size < limit) snprintf(buf, len, "%dB", (int) size); else { size >>= 9; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dkB", (int) half_rounded(size)); else { size >>= 10; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dMB", (int) half_rounded(size)); else { size >>= 10; - if (Abs(size) < limit2) + if (size < limit2) snprintf(buf, len, "%dGB", (int) half_rounded(size)); else { @@ -315,7 +354,7 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) json_add(buf, JT_BEGIN_OBJECT, &json_level); - json_add_value(buf, "id", base36enc(backup->start_time), json_level, + json_add_value(buf, "id", backup_id_of(backup), json_level, true); if (backup->parent_backup != 0) @@ -414,7 +453,7 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) appendPQExpBuffer(buf, INT64_FORMAT, backup->uncompressed_bytes); } - if (backup->uncompressed_bytes >= 0) + if (backup->pgdata_bytes >= 0) { json_add_key(buf, "pgdata-bytes", json_level); appendPQExpBuffer(buf, INT64_FORMAT, backup->pgdata_bytes); @@ -441,6 +480,32 @@ print_backup_json_object(PQExpBuffer buf, pgBackup *backup) appendPQExpBuffer(buf, "%u", backup->content_crc); } + /* print tablespaces list */ + if (backup_has_tablespace_map(backup)) + { + parray *links = parray_new(); + + json_add_key(buf, "tablespace_map", json_level); + json_add(buf, JT_BEGIN_ARRAY, &json_level); + + read_tablespace_map(links, backup->root_dir); + parray_qsort(links, pgFileCompareLinked); + + for (size_t i = 0; i < parray_num(links); i++){ + pgFile *link = (pgFile *) parray_get(links, i); + if (i) + appendPQExpBufferChar(buf, ','); + json_add(buf, JT_BEGIN_OBJECT, &json_level); + json_add_value(buf, "oid", link->name, json_level, true); + json_add_value(buf, "path", link->linked, json_level, true); + json_add(buf, JT_END_OBJECT, &json_level); + } + /* End of tablespaces */ + json_add(buf, JT_END_ARRAY, &json_level); + parray_walk(links, pgFileFree); + parray_free(links); + } + json_add(buf, JT_END_OBJECT, &json_level); } @@ -476,12 +541,34 @@ show_backup(InstanceState *instanceState, time_t requested_backup_id) elog(INFO, "Requested backup \"%s\" is not found.", /* We do not need free base36enc's result, we exit anyway */ base36enc(requested_backup_id)); + parray_walk(backups, pgBackupFree); + parray_free(backups); /* This is not error */ return 0; } if (show_format == SHOW_PLAIN) + { pgBackupWriteControl(stdout, backup, false); + + /* print tablespaces list */ + if (backup_has_tablespace_map(backup)) + { + parray *links = parray_new(); + + fio_fprintf(stdout, "\ntablespace_map = '"); + + read_tablespace_map(links, backup->root_dir); + parray_qsort(links, pgFileCompareLinked); + + for (size_t i = 0; i < parray_num(links); i++){ + pgFile *link = (pgFile *) parray_get(links, i); + fio_fprintf(stdout, "%s %s%s", link->name, link->linked, (i < parray_num(links) - 1) ? "; " : "'\n"); + } + parray_walk(links, pgFileFree); + parray_free(links); + } + } else elog(ERROR, "Invalid show format %d", (int) show_format); @@ -513,6 +600,9 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na ShowBackendRow *rows; TimeLineID parent_tli = 0; + // Since we've been printing a table, set LC_NUMERIC to its default environment value + set_output_numeric_locale(LOCALE_ENV); + for (i = 0; i < SHOW_FIELDS_COUNT; i++) widths[i] = strlen(names[i]); @@ -542,7 +632,7 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na /* ID */ snprintf(row->backup_id, lengthof(row->backup_id), "%s", - base36enc(backup->start_time)); + backup_id_of(backup)); widths[cur] = Max(widths[cur], strlen(row->backup_id)); cur++; @@ -726,6 +816,8 @@ show_instance_plain(const char *instance_name, parray *backup_list, bool show_na } pfree(rows); + // Restore the C locale + set_output_numeric_locale(LOCALE_C); } /* @@ -806,6 +898,9 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, uint32 widths_sum = 0; ShowArchiveRow *rows; + // Since we've been printing a table, set LC_NUMERIC to its default environment value + set_output_numeric_locale(LOCALE_ENV); + for (i = 0; i < SHOW_ARCHIVE_FIELDS_COUNT; i++) widths[i] = strlen(names[i]); @@ -973,6 +1068,8 @@ show_archive_plain(const char *instance_name, uint32 xlog_seg_size, } pfree(rows); + // Restore the C locale + set_output_numeric_locale(LOCALE_C); //TODO: free timelines } @@ -1045,13 +1142,14 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, appendPQExpBuffer(buf, "%lu", tlinfo->size); json_add_key(buf, "zratio", json_level); + if (tlinfo->size != 0) - zratio = ((float)xlog_seg_size*tlinfo->n_xlog_files) / tlinfo->size; + zratio = ((float) xlog_seg_size * tlinfo->n_xlog_files) / tlinfo->size; appendPQExpBuffer(buf, "%.2f", zratio); if (tlinfo->closest_backup != NULL) snprintf(tmp_buf, lengthof(tmp_buf), "%s", - base36enc(tlinfo->closest_backup->start_time)); + backup_id_of(tlinfo->closest_backup)); else snprintf(tmp_buf, lengthof(tmp_buf), "%s", ""); @@ -1123,3 +1221,10 @@ show_archive_json(const char *instance_name, uint32 xlog_seg_size, first_instance = false; } + +static bool backup_has_tablespace_map(pgBackup *backup) +{ + char map_path[MAXPGPATH]; + join_path_components(map_path, backup->database_dir, PG_TABLESPACE_MAP_FILE); + return fileExists(map_path, FIO_BACKUP_HOST); +} diff --git a/src/stream.c b/src/stream.c index 1ee8dee37..77453e997 100644 --- a/src/stream.c +++ b/src/stream.c @@ -275,11 +275,19 @@ StreamLog(void *arg) ctl.mark_done = false; #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 + ctl.walmethod = CreateWalDirectoryMethod( + stream_arg->basedir, + PG_COMPRESSION_NONE, + 0, + false); +#else /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 150000 */ ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, // (instance_config.compress_alg == NONE_COMPRESS) ? 0 : instance_config.compress_level, 0, false); +#endif /* PG_VERSION_NUM >= 150000 */ ctl.replication_slot = replication_slot; ctl.stop_socket = PGINVALID_SOCKET; ctl.do_sync = false; /* We sync all files at the end of backup */ @@ -299,7 +307,11 @@ StreamLog(void *arg) } #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 160000 + if (!ctl.walmethod->ops->finish(ctl.walmethod)) +#else if (!ctl.walmethod->finish()) +#endif { interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", @@ -521,7 +533,7 @@ get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backu /* link parent to child */ for (i = 0; i < parray_num(tli_list); i++) { - timelineInfo *tlinfo = (timelineInfo *) parray_get(tli_list, i); + tlinfo = (timelineInfo *) parray_get(tli_list, i); for (j = 0; j < parray_num(tli_list); j++) { @@ -538,7 +550,7 @@ get_history_streaming(ConnectionOptions *conn_opt, TimeLineID tli, parray *backu /* add backups to each timeline info */ for (i = 0; i < parray_num(tli_list); i++) { - timelineInfo *tlinfo = parray_get(tli_list, i); + tlinfo = parray_get(tli_list, i); for (j = 0; j < parray_num(backup_list); j++) { pgBackup *backup = parray_get(backup_list, j); @@ -640,7 +652,7 @@ start_WAL_streaming(PGconn *backup_conn, char *stream_dst_path, ConnectionOption //TODO Add a comment about this calculation stream_stop_timeout = stream_stop_timeout + stream_stop_timeout * 0.1; - strncpy(stream_thread_arg.basedir, stream_dst_path, sizeof(stream_thread_arg.basedir)); + strlcpy(stream_thread_arg.basedir, stream_dst_path, sizeof(stream_thread_arg.basedir)); /* * Connect in replication mode to the server. diff --git a/src/util.c b/src/util.c index fb33fd046..3c0a33453 100644 --- a/src/util.c +++ b/src/util.c @@ -3,7 +3,7 @@ * util.c: log messages to log file or stderr, and misc code. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2021, Postgres Professional * *------------------------------------------------------------------------- */ @@ -32,38 +32,22 @@ static const char *statusName[] = }; const char * -base36enc(long unsigned int value) +base36enc_to(long unsigned int value, char buf[ARG_SIZE_HINT base36bufsize]) { const char base36[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - /* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */ - static char buffer[14]; - unsigned int offset = sizeof(buffer); + char buffer[base36bufsize]; + char *p; - buffer[--offset] = '\0'; + p = &buffer[sizeof(buffer)-1]; + *p = '\0'; do { - buffer[--offset] = base36[value % 36]; + *(--p) = base36[value % 36]; } while (value /= 36); - return &buffer[offset]; -} - -/* - * Same as base36enc(), but the result must be released by the user. - */ -char * -base36enc_dup(long unsigned int value) -{ - const char base36[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - /* log(2**64) / log(36) = 12.38 => max 13 char + '\0' */ - char buffer[14]; - unsigned int offset = sizeof(buffer); + /* I know, it doesn't look safe */ + strncpy(buf, p, base36bufsize); - buffer[--offset] = '\0'; - do { - buffer[--offset] = base36[value % 36]; - } while (value /= 36); - - return strdup(&buffer[offset]); + return buf; } long unsigned int @@ -90,7 +74,7 @@ checkControlFile(ControlFileData *ControlFile) if ((ControlFile->pg_control_version % 65536 == 0 || ControlFile->pg_control_version % 65536 > 10000) && ControlFile->pg_control_version / 65536 != 0) - elog(ERROR, "possible byte ordering mismatch\n" + elog(ERROR, "Possible byte ordering mismatch\n" "The byte ordering used to store the pg_control file might not match the one\n" "used by this program. In that case the results below would be incorrect, and\n" "the PostgreSQL installation would be incompatible with this data directory."); @@ -109,7 +93,7 @@ digestControlFile(ControlFileData *ControlFile, char *src, size_t size) #endif if (size != ControlFileSize) - elog(ERROR, "unexpected control file size %d, expected %d", + elog(ERROR, "Unexpected control file size %d, expected %d", (int) size, ControlFileSize); memcpy(ControlFile, src, sizeof(ControlFileData)); @@ -206,6 +190,26 @@ get_current_timeline_from_control(const char *pgdata_path, fio_location location return ControlFile.checkPointCopy.ThisTimeLineID; } +void +get_control_file_or_back_file(const char *pgdata_path, fio_location location, ControlFileData *control) +{ + char *buffer; + size_t size; + + /* First fetch file... */ + buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, true, location); + + if (!buffer || size == 0){ + /* Error read XLOG_CONTROL_FILE or file is truncated, trying read backup */ + buffer = slurpFile(pgdata_path, XLOG_CONTROL_BAK_FILE, &size, true, location); + if (!buffer) + elog(ERROR, "Could not read %s and %s files\n", XLOG_CONTROL_FILE, XLOG_CONTROL_BAK_FILE); /* Maybe it should be PANIC? */ + } + digestControlFile(control, buffer, size); + pg_free(buffer); +} + + /* * Get last check point record ptr from pg_tonrol. */ @@ -349,22 +353,6 @@ get_pgcontrol_checksum(const char *pgdata_path) return ControlFile.crc; } -DBState -get_system_dbstate(const char *pgdata_path, fio_location location) -{ - ControlFileData ControlFile; - char *buffer; - size_t size; - - buffer = slurpFile(pgdata_path, XLOG_CONTROL_FILE, &size, false, location); - if (buffer == NULL) - return 0; - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.state; -} - void get_redo(const char *pgdata_path, fio_location pgdata_location, RedoParams *redo) { @@ -589,7 +577,27 @@ datapagemap_print_debug(datapagemap_t *map) iter = datapagemap_iterate(map); while (datapagemap_next(iter, &blocknum)) - elog(INFO, " block %u", blocknum); + elog(VERBOSE, " block %u", blocknum); pg_free(iter); } + +const char* +backup_id_of(pgBackup *backup) +{ + /* Change this Assert when backup_id will not be bound to start_time */ + Assert(backup->backup_id == backup->start_time || backup->start_time == 0); + + if (backup->backup_id_encoded[0] == '\x00') + { + base36enc_to(backup->backup_id, backup->backup_id_encoded); + } + return backup->backup_id_encoded; +} + +void +reset_backup_id(pgBackup *backup) +{ + backup->backup_id = INVALID_BACKUP_ID; + memset(backup->backup_id_encoded, 0, sizeof(backup->backup_id_encoded)); +} diff --git a/src/utils/configuration.c b/src/utils/configuration.c index 04bfbbe3b..f049aa1be 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -18,7 +18,11 @@ #include "getopt_long.h" +#ifndef WIN32 +#include +#endif #include +#include #define MAXPG_LSNCOMPONENT 8 @@ -517,17 +521,22 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], optstring = longopts_to_optstring(longopts, cmd_len + len); + opterr = 0; /* Assign named options */ while ((c = getopt_long(argc, argv, optstring, longopts, &optindex)) != -1) { ConfigOption *opt; + if (c == '?') + { + elog(ERROR, "Option '%s' requires an argument. Try \"%s --help\" for more information.", + argv[optind-1], PROGRAM_NAME); + } opt = option_find(c, cmd_options); if (opt == NULL) opt = option_find(c, options); if (opt - && !remote_agent && opt->allowed < SOURCE_CMD && opt->allowed != SOURCE_CMD_STRICT) elog(ERROR, "Option %s cannot be specified in command line", opt->lname); @@ -535,6 +544,9 @@ config_get_opt(int argc, char **argv, ConfigOption cmd_options[], assign_option(opt, optarg, SOURCE_CMD); } + pgut_free(optstring); + pgut_free(longopts); + return optind; } @@ -666,6 +678,8 @@ config_set_opt(ConfigOption options[], void *var, OptionSource source) /* * Return value of the function in the string representation. Result is * allocated string. + * We can set GET_VAL_IN_BASE_UNITS flag in opt->flags + * before call option_get_value() to get option value in default units */ char * option_get_value(ConfigOption *opt) @@ -680,20 +694,33 @@ option_get_value(ConfigOption *opt) */ if (opt->flags & OPTION_UNIT) { - if (opt->type == 'i') - convert_from_base_unit(*((int32 *) opt->var), - opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'i') - convert_from_base_unit(*((int64 *) opt->var), - opt->flags & OPTION_UNIT, &value, &unit); - else if (opt->type == 'u') - convert_from_base_unit_u(*((uint32 *) opt->var), - opt->flags & OPTION_UNIT, &value_u, &unit); - else if (opt->type == 'U') - convert_from_base_unit_u(*((uint64 *) opt->var), - opt->flags & OPTION_UNIT, &value_u, &unit); + if (opt->flags & GET_VAL_IN_BASE_UNITS){ + if (opt->type == 'i') + value = *((int32 *) opt->var); + else if (opt->type == 'I') + value = *((int64 *) opt->var); + else if (opt->type == 'u') + value_u = *((uint32 *) opt->var); + else if (opt->type == 'U') + value_u = *((uint64 *) opt->var); + unit = ""; + } + else + { + if (opt->type == 'i') + convert_from_base_unit(*((int32 *) opt->var), + opt->flags & OPTION_UNIT, &value, &unit); + else if (opt->type == 'I') + convert_from_base_unit(*((int64 *) opt->var), + opt->flags & OPTION_UNIT, &value, &unit); + else if (opt->type == 'u') + convert_from_base_unit_u(*((uint32 *) opt->var), + opt->flags & OPTION_UNIT, &value_u, &unit); + else if (opt->type == 'U') + convert_from_base_unit_u(*((uint64 *) opt->var), + opt->flags & OPTION_UNIT, &value_u, &unit); + } } - /* Get string representation itself */ switch (opt->type) { @@ -1171,7 +1198,8 @@ parse_time(const char *value, time_t *result, bool utc_default) char *local_tz = getenv("TZ"); /* tmp = replace( value, !isalnum, ' ' ) */ - tmp = pgut_malloc(strlen(value) + + 1); + tmp = pgut_malloc(strlen(value) + 1); + if(!tmp) return false; len = 0; fields_num = 1; @@ -1199,7 +1227,10 @@ parse_time(const char *value, time_t *result, bool utc_default) errno = 0; hr = strtol(value + 1, &cp, 10); if ((value + 1) == cp || errno == ERANGE) + { + pfree(tmp); return false; + } /* explicit delimiter? */ if (*cp == ':') @@ -1207,13 +1238,19 @@ parse_time(const char *value, time_t *result, bool utc_default) errno = 0; min = strtol(cp + 1, &cp, 10); if (errno == ERANGE) + { + pfree(tmp); return false; + } if (*cp == ':') { errno = 0; sec = strtol(cp + 1, &cp, 10); if (errno == ERANGE) + { + pfree(tmp); return false; + } } } /* otherwise, might have run things together... */ @@ -1228,11 +1265,20 @@ parse_time(const char *value, time_t *result, bool utc_default) /* Range-check the values; see notes in datatype/timestamp.h */ if (hr < 0 || hr > MAX_TZDISP_HOUR) + { + pfree(tmp); return false; + } if (min < 0 || min >= MINS_PER_HOUR) + { + pfree(tmp); return false; + } if (sec < 0 || sec >= SECS_PER_MINUTE) + { + pfree(tmp); return false; + } tz = (hr * MINS_PER_HOUR + min) * SECS_PER_MINUTE + sec; if (*value == '-') @@ -1245,7 +1291,10 @@ parse_time(const char *value, time_t *result, bool utc_default) } /* wrong format */ else if (!IsSpace(*value)) + { + pfree(tmp); return false; + } else value++; } @@ -1262,7 +1311,7 @@ parse_time(const char *value, time_t *result, bool utc_default) i = sscanf(tmp, "%04d %02d %02d %02d %02d %02d%1s", &tm.tm_year, &tm.tm_mon, &tm.tm_mday, &tm.tm_hour, &tm.tm_min, &tm.tm_sec, junk); - free(tmp); + pfree(tmp); if (i < 3 || i > 6) return false; @@ -1288,9 +1337,7 @@ parse_time(const char *value, time_t *result, bool utc_default) { /* set timezone to UTC */ pgut_setenv("TZ", "UTC"); -#ifdef WIN32 tzset(); -#endif } /* convert time to utc unix time */ @@ -1302,9 +1349,7 @@ parse_time(const char *value, time_t *result, bool utc_default) else pgut_unsetenv("TZ"); -#ifdef WIN32 tzset(); -#endif /* adjust time zone */ if (tz_set || utc_default) @@ -1415,16 +1460,16 @@ parse_lsn(const char *value, XLogRecPtr *result) len1 = strspn(value, "0123456789abcdefABCDEF"); if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || value[len1] != '/') - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); len2 = strspn(value + len1 + 1, "0123456789abcdefABCDEF"); if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || value[len1 + 1 + len2] != '\0') - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); if (sscanf(value, "%X/%X", &xlogid, &xrecoff) == 2) *result = (XLogRecPtr) ((uint64) xlogid << 32) | xrecoff; else { - elog(ERROR, "invalid LSN \"%s\"", value); + elog(ERROR, "Invalid LSN \"%s\"", value); return false; } @@ -1540,33 +1585,19 @@ time2iso(char *buf, size_t len, time_t time, bool utc) time_t gmt; time_t offset; char *ptr = buf; - char *local_tz = getenv("TZ"); /* set timezone to UTC if requested */ if (utc) { - pgut_setenv("TZ", "UTC"); -#ifdef WIN32 - tzset(); -#endif + ptm = gmtime(&time); + strftime(ptr, len, "%Y-%m-%d %H:%M:%S+00", ptm); + return; } ptm = gmtime(&time); gmt = mktime(ptm); ptm = localtime(&time); - if (utc) - { - /* return old timezone back if any */ - if (local_tz) - pgut_setenv("TZ", local_tz); - else - pgut_unsetenv("TZ"); -#ifdef WIN32 - tzset(); -#endif - } - /* adjust timezone offset */ offset = time - gmt + (ptm->tm_isdst ? 3600 : 0); diff --git a/src/utils/configuration.h b/src/utils/configuration.h index 2c6ea3eec..59da29bd5 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -100,6 +100,7 @@ struct ConfigOption #define OPTION_UNIT_TIME 0xF0000 /* mask for time-related units */ #define OPTION_UNIT (OPTION_UNIT_MEMORY | OPTION_UNIT_TIME) +#define GET_VAL_IN_BASE_UNITS 0x80000000 /* bitflag to get memory and time values in default units*/ extern ProbackupSubcmd parse_subcmd(char const * const subcmd_str); extern char const *get_subcmd_name(ProbackupSubcmd const subcmd); diff --git a/src/utils/file.c b/src/utils/file.c index 7d1df554b..fa08939f5 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -18,6 +18,10 @@ static __thread int fio_stdin = 0; static __thread int fio_stderr = 0; static char *async_errormsg = NULL; +#define PAGE_ZEROSEARCH_COARSE_GRANULARITY 4096 +#define PAGE_ZEROSEARCH_FINE_GRANULARITY 64 +static const char zerobuf[PAGE_ZEROSEARCH_COARSE_GRANULARITY] = {0}; + fio_location MyLocation; typedef struct @@ -269,8 +273,8 @@ fio_write_all(int fd, void const* buf, size_t size) } /* Get version of remote agent */ -int -fio_get_agent_version(void) +void +fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size) { fio_header hdr; hdr.cop = FIO_AGENT_VERSION; @@ -278,8 +282,13 @@ fio_get_agent_version(void) IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_read_all(fio_stdin, &hdr, sizeof(hdr)), sizeof(hdr)); + if (hdr.size > payload_buf_size) + { + elog(ERROR, "Corrupted remote compatibility protocol: insufficient payload_buf_size=%zu", payload_buf_size); + } - return hdr.arg; + *protocol = hdr.arg; + IO_CHECK(fio_read_all(fio_stdin, payload_buf, hdr.size), hdr.size); } /* Open input stream. Remote file is fetched to the in-memory buffer and then accessed through Linux fmemopen */ @@ -305,7 +314,7 @@ fio_open_stream(char const* path, fio_location location) IO_CHECK(fio_read_all(fio_stdin, fio_stdin_buffer, hdr.size), hdr.size); #ifdef WIN32 f = tmpfile(); - IO_CHECK(fwrite(f, 1, hdr.size, fio_stdin_buffer), hdr.size); + IO_CHECK(fwrite(fio_stdin_buffer, 1, hdr.size, f), hdr.size); SYS_CHECK(fseek(f, 0, SEEK_SET)); #else f = fmemopen(fio_stdin_buffer, hdr.size, "r"); @@ -489,8 +498,10 @@ fio_disconnect(void) Assert(hdr.cop == FIO_DISCONNECTED); SYS_CHECK(close(fio_stdin)); SYS_CHECK(close(fio_stdout)); + SYS_CHECK(close(fio_stderr)); fio_stdin = 0; fio_stdout = 0; + fio_stderr = 0; wait_ssh(); } } @@ -1148,24 +1159,35 @@ fio_stat(char const* path, struct stat* st, bool follow_symlink, fio_location lo bool fio_is_same_file(char const* filename1, char const* filename2, bool follow_symlink, fio_location location) { + char *abs_name1 = make_absolute_path(filename1); + char *abs_name2 = make_absolute_path(filename2); + bool result = strcmp(abs_name1, abs_name2) == 0; + #ifndef WIN32 - struct stat stat1, stat2; + if (!result) + { + struct stat stat1, stat2; - if (fio_stat(filename1, &stat1, follow_symlink, location) < 0) - elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); + if (fio_stat(filename1, &stat1, follow_symlink, location) < 0) + { + if (errno == ENOENT) + return false; + elog(ERROR, "Can't stat file \"%s\": %s", filename1, strerror(errno)); + } - if (fio_stat(filename2, &stat2, follow_symlink, location) < 0) - elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); + if (fio_stat(filename2, &stat2, follow_symlink, location) < 0) + { + if (errno == ENOENT) + return false; + elog(ERROR, "Can't stat file \"%s\": %s", filename2, strerror(errno)); + } - return stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev; -#else - char *abs_name1 = make_absolute_path(filename1); - char *abs_name2 = make_absolute_path(filename2); - bool result = strcmp(abs_name1, abs_name2) == 0; + result = (stat1.st_ino == stat2.st_ino && stat1.st_dev == stat2.st_dev); + } +#endif free(abs_name2); free(abs_name1); return result; -#endif } /* @@ -1353,10 +1375,20 @@ fio_sync(char const* path, fio_location location) } } +enum { + GET_CRC32_DECOMPRESS = 1, + GET_CRC32_MISSING_OK = 2, + GET_CRC32_TRUNCATED = 4 +}; + /* Get crc32 of file */ -pg_crc32 -fio_get_crc32(const char *file_path, fio_location location, bool decompress) +static pg_crc32 +fio_get_crc32_ex(const char *file_path, fio_location location, + bool decompress, bool missing_ok, bool truncated) { + if (decompress && truncated) + elog(ERROR, "Could not calculate CRC for compressed truncated file"); + if (fio_is_remote(location)) { fio_header hdr; @@ -1368,7 +1400,11 @@ fio_get_crc32(const char *file_path, fio_location location, bool decompress) hdr.arg = 0; if (decompress) - hdr.arg = 1; + hdr.arg = GET_CRC32_DECOMPRESS; + if (missing_ok) + hdr.arg |= GET_CRC32_MISSING_OK; + if (truncated) + hdr.arg |= GET_CRC32_TRUNCATED; IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); IO_CHECK(fio_write_all(fio_stdout, file_path, path_len), path_len); @@ -1379,12 +1415,28 @@ fio_get_crc32(const char *file_path, fio_location location, bool decompress) else { if (decompress) - return pgFileGetCRCgz(file_path, true, true); + return pgFileGetCRCgz(file_path, true, missing_ok); + else if (truncated) + return pgFileGetCRCTruncated(file_path, true, missing_ok); else - return pgFileGetCRC(file_path, true, true); + return pgFileGetCRC(file_path, true, missing_ok); } } +pg_crc32 +fio_get_crc32(const char *file_path, fio_location location, + bool decompress, bool missing_ok) +{ + return fio_get_crc32_ex(file_path, location, decompress, missing_ok, false); +} + +pg_crc32 +fio_get_crc32_truncated(const char *file_path, fio_location location, + bool missing_ok) +{ + return fio_get_crc32_ex(file_path, location, false, missing_ok, true); +} + /* Remove file */ int fio_unlink(char const* path, fio_location location) @@ -2445,7 +2497,7 @@ fio_send_pages_impl(int out, char* buf) * REMOTE_ERROR (-6) */ int -fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, char **errormsg) +fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) { fio_header hdr; int exit_code = SEND_OK; @@ -2485,11 +2537,22 @@ fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, exit_code = hdr.arg; goto cleanup; } - else if (hdr.cop == FIO_PAGE) + else if (hdr.cop == FIO_PAGE || hdr.cop == FIO_PAGE_ZERO) { int rc; - Assert(hdr.size <= CHUNK_SIZE); - IO_CHECK(fio_read_all(fio_stdin, in_buf, hdr.size), hdr.size); + unsigned size; + if (hdr.cop == FIO_PAGE) + { + Assert(hdr.size <= CHUNK_SIZE); + size = hdr.size; + IO_CHECK(fio_read_all(fio_stdin, in_buf, hdr.size), hdr.size); + } + else + { + Assert(hdr.arg <= CHUNK_SIZE); + size = hdr.arg; + memset(in_buf, 0, hdr.arg); + } /* We have received a chunk of compressed data, lets decompress it */ if (strm == NULL) @@ -2500,7 +2563,7 @@ fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, /* The fields next_in, avail_in initialized before init */ strm->next_in = (Bytef *)in_buf; - strm->avail_in = hdr.size; + strm->avail_in = size; rc = inflateInit2(strm, 15 + 16); @@ -2517,7 +2580,7 @@ fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, else { strm->next_in = (Bytef *)in_buf; - strm->avail_in = hdr.size; + strm->avail_in = size; } strm->next_out = (Bytef *)out_buf; /* output buffer */ @@ -2594,6 +2657,113 @@ fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, return exit_code; } +typedef struct send_file_state { + bool calc_crc; + uint32_t crc; + int64_t read_size; + int64_t write_size; +} send_file_state; + +/* find page border of all-zero tail */ +static size_t +find_zero_tail(char *buf, size_t len) +{ + size_t i, l; + size_t granul = sizeof(zerobuf); + + if (len == 0) + return 0; + + /* fast check for last bytes */ + l = Min(len, PAGE_ZEROSEARCH_FINE_GRANULARITY); + i = len - l; + if (memcmp(buf + i, zerobuf, l) != 0) + return len; + + /* coarse search for zero tail */ + i = (len-1) & ~(granul-1); + l = len - i; + for (;;) + { + if (memcmp(buf+i, zerobuf, l) != 0) + { + i += l; + break; + } + if (i == 0) + break; + i -= granul; + l = granul; + } + + len = i; + /* search zero tail with finer granularity */ + for (granul = sizeof(zerobuf)/2; + len > 0 && granul >= PAGE_ZEROSEARCH_FINE_GRANULARITY; + granul /= 2) + { + if (granul > l) + continue; + i = (len-1) & ~(granul-1); + l = len - i; + if (memcmp(buf+i, zerobuf, l) == 0) + len = i; + } + + return len; +} + +static void +fio_send_file_crc(send_file_state* st, char *buf, size_t len) +{ + int64_t write_size; + + if (!st->calc_crc) + return; + + write_size = st->write_size; + while (st->read_size > write_size) + { + size_t crc_len = Min(st->read_size - write_size, sizeof(zerobuf)); + COMP_FILE_CRC32(true, st->crc, zerobuf, crc_len); + write_size += crc_len; + } + + if (len > 0) + COMP_FILE_CRC32(true, st->crc, buf, len); +} + +static bool +fio_send_file_write(FILE* out, send_file_state* st, char *buf, size_t len) +{ + if (len == 0) + return true; + +#ifdef WIN32 + if (st->read_size > st->write_size && + _chsize_s(fileno(out), st->read_size) != 0) + { + elog(WARNING, "Could not change file size to %lld: %m", st->read_size); + return false; + } +#endif + if (st->read_size > st->write_size && + fseeko(out, st->read_size, SEEK_SET) != 0) + { + return false; + } + + if (fwrite(buf, 1, len, out) != len) + { + return false; + } + + st->read_size += len; + st->write_size = st->read_size; + + return true; +} + /* Receive chunks of data and write them to destination file. * Return codes: * SEND_OK (0) @@ -2606,13 +2776,22 @@ fio_send_file_gz(const char *from_fullpath, const char *to_fullpath, FILE* out, * If pgFile is not NULL then we must calculate crc and read_size for it. */ int -fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, +fio_send_file(const char *from_fullpath, FILE* out, bool cut_zero_tail, pgFile *file, char **errormsg) { fio_header hdr; int exit_code = SEND_OK; size_t path_len = strlen(from_fullpath) + 1; char *buf = pgut_malloc(CHUNK_SIZE); /* buffer */ + send_file_state st = {false, 0, 0, 0}; + + memset(&hdr, 0, sizeof(hdr)); + + if (file) + { + st.calc_crc = true; + st.crc = file->crc; + } hdr.cop = FIO_SEND_FILE; hdr.size = path_len; @@ -2630,6 +2809,37 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, if (hdr.cop == FIO_SEND_FILE_EOF) { + if (st.write_size < st.read_size) + { + if (!cut_zero_tail) + { + /* + * We still need to calc crc for zero tail. + */ + fio_send_file_crc(&st, NULL, 0); + + /* + * Let's write single zero byte to the end of file to restore + * logical size. + * Well, it would be better to use ftruncate here actually, + * but then we need to change interface. + */ + st.read_size -= 1; + buf[0] = 0; + if (!fio_send_file_write(out, &st, buf, 1)) + { + exit_code = WRITE_FAILED; + break; + } + } + } + + if (file) + { + file->crc = st.crc; + file->read_size = st.read_size; + file->write_size = st.write_size; + } break; } else if (hdr.cop == FIO_ERROR) @@ -2650,17 +2860,23 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, IO_CHECK(fio_read_all(fio_stdin, buf, hdr.size), hdr.size); /* We have received a chunk of data data, lets write it out */ - if (fwrite(buf, 1, hdr.size, out) != hdr.size) + fio_send_file_crc(&st, buf, hdr.size); + if (!fio_send_file_write(out, &st, buf, hdr.size)) { exit_code = WRITE_FAILED; break; } + } + else if (hdr.cop == FIO_PAGE_ZERO) + { + Assert(hdr.size == 0); + Assert(hdr.arg <= CHUNK_SIZE); - if (file) - { - file->read_size += hdr.size; - COMP_FILE_CRC32(true, file->crc, buf, hdr.size); - } + /* + * We have received a chunk of zero data, lets just think we + * wrote it. + */ + st.read_size += hdr.arg; } else { @@ -2676,6 +2892,128 @@ fio_send_file(const char *from_fullpath, const char *to_fullpath, FILE* out, return exit_code; } +int +fio_send_file_local(const char *from_fullpath, FILE* out, bool cut_zero_tail, + pgFile *file, char **errormsg) +{ + FILE* in; + char* buf; + size_t read_len, non_zero_len; + int exit_code = SEND_OK; + send_file_state st = {false, 0, 0, 0}; + + if (file) + { + st.calc_crc = true; + st.crc = file->crc; + } + + /* open source file for read */ + in = fopen(from_fullpath, PG_BINARY_R); + if (in == NULL) + { + /* maybe deleted, it's not error in case of backup */ + if (errno == ENOENT) + return FILE_MISSING; + + + *errormsg = psprintf("Cannot open file \"%s\": %s", from_fullpath, + strerror(errno)); + return OPEN_FAILED; + } + + /* disable stdio buffering for local input/output files to avoid triple buffering */ + setvbuf(in, NULL, _IONBF, BUFSIZ); + setvbuf(out, NULL, _IONBF, BUFSIZ); + + /* allocate 64kB buffer */ + buf = pgut_malloc(CHUNK_SIZE); + + /* copy content and calc CRC */ + for (;;) + { + read_len = fread(buf, 1, CHUNK_SIZE, in); + + if (ferror(in)) + { + *errormsg = psprintf("Cannot read from file \"%s\": %s", + from_fullpath, strerror(errno)); + exit_code = READ_FAILED; + goto cleanup; + } + + if (read_len > 0) + { + non_zero_len = find_zero_tail(buf, read_len); + /* + * It is dirty trick to silence warnings in CFS GC process: + * backup at least cfs header size bytes. + */ + if (st.read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && + st.read_size + read_len > 0) + { + non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, + st.read_size + read_len); + non_zero_len -= st.read_size; + } + if (non_zero_len > 0) + { + fio_send_file_crc(&st, buf, non_zero_len); + if (!fio_send_file_write(out, &st, buf, non_zero_len)) + { + exit_code = WRITE_FAILED; + goto cleanup; + } + } + if (non_zero_len < read_len) + { + /* Just pretend we wrote it. */ + st.read_size += read_len - non_zero_len; + } + } + + if (feof(in)) + break; + } + + if (st.write_size < st.read_size) + { + if (!cut_zero_tail) + { + /* + * We still need to calc crc for zero tail. + */ + fio_send_file_crc(&st, NULL, 0); + + /* + * Let's write single zero byte to the end of file to restore + * logical size. + * Well, it would be better to use ftruncate here actually, + * but then we need to change interface. + */ + st.read_size -= 1; + buf[0] = 0; + if (!fio_send_file_write(out, &st, buf, 1)) + { + exit_code = WRITE_FAILED; + goto cleanup; + } + } + } + + if (file) + { + file->crc = st.crc; + file->read_size = st.read_size; + file->write_size = st.write_size; + } + +cleanup: + free(buf); + fclose(in); + return exit_code; +} + /* Send file content * On error we return FIO_ERROR message with following codes * FIO_ERROR: @@ -2694,6 +3032,7 @@ fio_send_file_impl(int out, char const* path) fio_header hdr; char *buf = pgut_malloc(CHUNK_SIZE); size_t read_len = 0; + int64_t read_size = 0; char *errormsg = NULL; /* open source file for read */ @@ -2736,6 +3075,7 @@ fio_send_file_impl(int out, char const* path) for (;;) { read_len = fread(buf, 1, CHUNK_SIZE, fp); + memset(&hdr, 0, sizeof(hdr)); /* report error */ if (ferror(fp)) @@ -2756,10 +3096,36 @@ fio_send_file_impl(int out, char const* path) if (read_len > 0) { /* send chunk */ - hdr.cop = FIO_PAGE; - hdr.size = read_len; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - IO_CHECK(fio_write_all(out, buf, read_len), read_len); + int64_t non_zero_len = find_zero_tail(buf, read_len); + /* + * It is dirty trick to silence warnings in CFS GC process: + * backup at least cfs header size bytes. + */ + if (read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && + read_size + read_len > 0) + { + non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, + read_size + read_len); + non_zero_len -= read_size; + } + + if (non_zero_len > 0) + { + hdr.cop = FIO_PAGE; + hdr.size = non_zero_len; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(out, buf, non_zero_len), non_zero_len); + } + + if (non_zero_len < read_len) + { + hdr.cop = FIO_PAGE_ZERO; + hdr.size = 0; + hdr.arg = read_len - non_zero_len; + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + } + + read_size += read_len; } if (feof(fp)) @@ -2778,6 +3144,210 @@ fio_send_file_impl(int out, char const* path) return; } +/* + * Read the local file to compute its CRC. + * We cannot make decision about file decompression because + * user may ask to backup already compressed files and we should be + * obvious about it. + */ +pg_crc32 +pgFileGetCRC(const char *file_path, bool use_crc32c, bool missing_ok) +{ + FILE *fp; + pg_crc32 crc = 0; + char *buf; + size_t len = 0; + + INIT_FILE_CRC32(use_crc32c, crc); + + /* open file in binary read mode */ + fp = fopen(file_path, PG_BINARY_R); + if (fp == NULL) + { + if (errno == ENOENT) + { + if (missing_ok) + { + FIN_FILE_CRC32(use_crc32c, crc); + return crc; + } + } + + elog(ERROR, "Cannot open file \"%s\": %s", + file_path, strerror(errno)); + } + + /* disable stdio buffering */ + setvbuf(fp, NULL, _IONBF, BUFSIZ); + buf = pgut_malloc(STDIO_BUFSIZE); + + /* calc CRC of file */ + for (;;) + { + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + len = fread(buf, 1, STDIO_BUFSIZE, fp); + + if (ferror(fp)) + elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); + + /* update CRC */ + COMP_FILE_CRC32(use_crc32c, crc, buf, len); + + if (feof(fp)) + break; + } + + FIN_FILE_CRC32(use_crc32c, crc); + fclose(fp); + pg_free(buf); + + return crc; +} + +/* + * Read the local file to compute CRC for it extened to real_size. + */ +pg_crc32 +pgFileGetCRCTruncated(const char *file_path, bool use_crc32c, bool missing_ok) +{ + FILE *fp; + char *buf; + size_t len = 0; + size_t non_zero_len; + send_file_state st = {true, 0, 0, 0}; + + INIT_FILE_CRC32(use_crc32c, st.crc); + + /* open file in binary read mode */ + fp = fopen(file_path, PG_BINARY_R); + if (fp == NULL) + { + if (errno == ENOENT) + { + if (missing_ok) + { + FIN_FILE_CRC32(use_crc32c, st.crc); + return st.crc; + } + } + + elog(ERROR, "Cannot open file \"%s\": %s", + file_path, strerror(errno)); + } + + /* disable stdio buffering */ + setvbuf(fp, NULL, _IONBF, BUFSIZ); + buf = pgut_malloc(CHUNK_SIZE); + + /* calc CRC of file */ + for (;;) + { + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + len = fread(buf, 1, STDIO_BUFSIZE, fp); + + if (ferror(fp)) + elog(ERROR, "Cannot read \"%s\": %s", file_path, strerror(errno)); + + non_zero_len = find_zero_tail(buf, len); + /* same trick as in fio_send_file */ + if (st.read_size + non_zero_len < PAGE_ZEROSEARCH_FINE_GRANULARITY && + st.read_size + len > 0) + { + non_zero_len = Min(PAGE_ZEROSEARCH_FINE_GRANULARITY, + st.read_size + len); + non_zero_len -= st.read_size; + } + if (non_zero_len) + { + fio_send_file_crc(&st, buf, non_zero_len); + st.write_size += st.read_size + non_zero_len; + } + st.read_size += len; + + if (feof(fp)) + break; + } + + FIN_FILE_CRC32(use_crc32c, st.crc); + fclose(fp); + pg_free(buf); + + return st.crc; +} + +/* + * Read the local file to compute its CRC. + * We cannot make decision about file decompression because + * user may ask to backup already compressed files and we should be + * obvious about it. + */ +pg_crc32 +pgFileGetCRCgz(const char *file_path, bool use_crc32c, bool missing_ok) +{ + gzFile fp; + pg_crc32 crc = 0; + int len = 0; + int err; + char *buf; + + INIT_FILE_CRC32(use_crc32c, crc); + + /* open file in binary read mode */ + fp = gzopen(file_path, PG_BINARY_R); + if (fp == NULL) + { + if (errno == ENOENT) + { + if (missing_ok) + { + FIN_FILE_CRC32(use_crc32c, crc); + return crc; + } + } + + elog(ERROR, "Cannot open file \"%s\": %s", + file_path, strerror(errno)); + } + + buf = pgut_malloc(STDIO_BUFSIZE); + + /* calc CRC of file */ + for (;;) + { + if (interrupted) + elog(ERROR, "interrupted during CRC calculation"); + + len = gzread(fp, buf, STDIO_BUFSIZE); + + if (len <= 0) + { + /* we either run into eof or error */ + if (gzeof(fp)) + break; + else + { + const char *err_str = NULL; + + err_str = gzerror(fp, &err); + elog(ERROR, "Cannot read from compressed file %s", err_str); + } + } + + /* update CRC */ + COMP_FILE_CRC32(use_crc32c, crc, buf, len); + } + + FIN_FILE_CRC32(use_crc32c, crc); + gzclose(fp); + pg_free(buf); + + return crc; +} + /* Compile the array of files located on remote machine in directory root */ static void fio_list_dir_internal(parray *files, const char *root, bool exclude, @@ -3312,9 +3882,16 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, buf, hdr.size), hdr.size); break; case FIO_AGENT_VERSION: - hdr.arg = AGENT_PROTOCOL_VERSION; - IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; + { + size_t payload_size = prepare_compatibility_str(buf, buf_size); + + hdr.arg = AGENT_PROTOCOL_VERSION; + hdr.size = payload_size; + + IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(out, buf, payload_size), payload_size); + break; + } case FIO_STAT: /* Get information about file with specified path */ hdr.size = sizeof(st); rc = hdr.arg ? stat(buf, &st) : lstat(buf, &st); @@ -3377,11 +3954,15 @@ fio_communicate(int in, int out) IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); break; case FIO_GET_CRC32: + Assert((hdr.arg & GET_CRC32_TRUNCATED) == 0 || + (hdr.arg & (GET_CRC32_TRUNCATED|GET_CRC32_DECOMPRESS)) == GET_CRC32_TRUNCATED); /* calculate crc32 for a file */ - if (hdr.arg == 1) - crc = pgFileGetCRCgz(buf, true, true); + if ((hdr.arg & GET_CRC32_DECOMPRESS)) + crc = pgFileGetCRCgz(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); + else if ((hdr.arg & GET_CRC32_TRUNCATED)) + crc = pgFileGetCRCTruncated(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); else - crc = pgFileGetCRC(buf, true, true); + crc = pgFileGetCRC(buf, true, (hdr.arg & GET_CRC32_MISSING_OK) != 0); IO_CHECK(fio_write_all(out, &crc, sizeof(crc)), sizeof(crc)); break; case FIO_GET_CHECKSUM_MAP: @@ -3403,7 +3984,8 @@ fio_communicate(int in, int out) case FIO_DISCONNECT: hdr.cop = FIO_DISCONNECTED; IO_CHECK(fio_write_all(out, &hdr, sizeof(hdr)), sizeof(hdr)); - break; + free(buf); + return; case FIO_GET_ASYNC_ERROR: fio_get_async_error_impl(out); break; diff --git a/src/utils/file.h b/src/utils/file.h index a554b4ab0..01e5a24f4 100644 --- a/src/utils/file.h +++ b/src/utils/file.h @@ -56,7 +56,8 @@ typedef enum FIO_CHECK_POSTMASTER, FIO_GET_ASYNC_ERROR, FIO_WRITE_ASYNC, - FIO_READLINK + FIO_READLINK, + FIO_PAGE_ZERO } fio_operations; typedef enum @@ -91,7 +92,7 @@ extern fio_location MyLocation; extern void fio_redirect(int in, int out, int err); extern void fio_communicate(int in, int out); -extern int fio_get_agent_version(void); +extern void fio_get_agent_version(int* protocol, char* payload_buf, size_t payload_buf_size); extern FILE* fio_fopen(char const* name, char const* mode, fio_location location); extern size_t fio_fwrite(FILE* f, void const* buf, size_t size); extern ssize_t fio_fwrite_async_compressed(FILE* f, void const* buf, size_t size, int compress_alg); @@ -120,7 +121,10 @@ extern int fio_truncate(int fd, off_t size); extern int fio_close(int fd); extern void fio_disconnect(void); extern int fio_sync(char const* path, fio_location location); -extern pg_crc32 fio_get_crc32(const char *file_path, fio_location location, bool decompress); +extern pg_crc32 fio_get_crc32(const char *file_path, fio_location location, + bool decompress, bool missing_ok); +extern pg_crc32 fio_get_crc32_truncated(const char *file_path, fio_location location, + bool missing_ok); extern int fio_rename(char const* old_path, char const* new_path, fio_location location); extern int fio_symlink(char const* target, char const* link_path, bool overwrite, fio_location location); diff --git a/src/utils/json.c b/src/utils/json.c index 9f13a958f..2c8e0fe9b 100644 --- a/src/utils/json.c +++ b/src/utils/json.c @@ -144,3 +144,21 @@ json_add_escaped(PQExpBuffer buf, const char *str) } appendPQExpBufferChar(buf, '"'); } + +void +json_add_min(PQExpBuffer buf, JsonToken type) +{ + switch (type) + { + case JT_BEGIN_OBJECT: + appendPQExpBufferChar(buf, '{'); + add_comma = false; + break; + case JT_END_OBJECT: + appendPQExpBufferStr(buf, "}\n"); + add_comma = true; + break; + default: + break; + } +} diff --git a/src/utils/json.h b/src/utils/json.h index cc9f1168d..f80832e69 100644 --- a/src/utils/json.h +++ b/src/utils/json.h @@ -25,6 +25,7 @@ typedef enum } JsonToken; extern void json_add(PQExpBuffer buf, JsonToken type, int32 *level); +extern void json_add_min(PQExpBuffer buf, JsonToken type); extern void json_add_key(PQExpBuffer buf, const char *name, int32 level); extern void json_add_value(PQExpBuffer buf, const char *name, const char *value, int32 level, bool escaped); diff --git a/src/utils/logger.c b/src/utils/logger.c index 70bd5dcc4..7ea41f74e 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -19,14 +19,19 @@ #include "utils/configuration.h" +#include "json.h" + /* Logger parameters */ LoggerConfig logger_config = { LOG_LEVEL_CONSOLE_DEFAULT, LOG_LEVEL_FILE_DEFAULT, LOG_FILENAME_DEFAULT, NULL, + NULL, LOG_ROTATION_SIZE_DEFAULT, - LOG_ROTATION_AGE_DEFAULT + LOG_ROTATION_AGE_DEFAULT, + LOG_FORMAT_CONSOLE_DEFAULT, + LOG_FORMAT_FILE_DEFAULT }; /* Implementation for logging.h */ @@ -227,6 +232,35 @@ write_elevel(FILE *stream, int elevel) } } +static void +write_elevel_for_json(PQExpBuffer buf, int elevel) +{ + switch (elevel) + { + case VERBOSE: + appendPQExpBufferStr(buf, "\"VERBOSE\""); + break; + case LOG: + appendPQExpBufferStr(buf, "\"LOG\""); + break; + case INFO: + appendPQExpBufferStr(buf, "\"INFO\""); + break; + case NOTICE: + appendPQExpBufferStr(buf, "\"NOTICE\""); + break; + case WARNING: + appendPQExpBufferStr(buf, "\"WARNING\""); + break; + case ERROR: + appendPQExpBufferStr(buf, "\"ERROR\""); + break; + default: + elog_stderr(ERROR, "invalid logging level: %d", elevel); + break; + } +} + /* * Exit with code if it is an error. * Check for in_cleanup flag to avoid deadlock in case of ERROR in cleanup @@ -276,6 +310,12 @@ elog_internal(int elevel, bool file_only, const char *message) time_t log_time = (time_t) time(NULL); char strfbuf[128]; char str_pid[128]; + char str_pid_json[128]; + char str_thread_json[64]; + PQExpBufferData show_buf; + PQExpBuffer buf_json = &show_buf; + int8 format_console, + format_file; write_to_file = elevel >= logger_config.log_level_file && logger_config.log_directory @@ -283,6 +323,8 @@ elog_internal(int elevel, bool file_only, const char *message) write_to_error_log = elevel >= ERROR && logger_config.error_log_filename && logger_config.log_directory && logger_config.log_directory[0] != '\0'; write_to_stderr = elevel >= logger_config.log_level_console && !file_only; + format_console = logger_config.log_format_console; + format_file = logger_config.log_format_file; if (remote_agent) { @@ -292,10 +334,27 @@ elog_internal(int elevel, bool file_only, const char *message) pthread_lock(&log_file_mutex); loggin_in_progress = true; - if (write_to_file || write_to_error_log || is_archive_cmd) + if (write_to_file || write_to_error_log || is_archive_cmd || + format_console == JSON) strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&log_time)); + if (format_file == JSON || format_console == JSON) + { + snprintf(str_pid_json, sizeof(str_pid_json), "%d", my_pid); + snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num); + + initPQExpBuffer(&show_buf); + json_add_min(buf_json, JT_BEGIN_OBJECT); + json_add_value(buf_json, "ts", strfbuf, 0, true); + json_add_value(buf_json, "pid", str_pid_json, 0, true); + json_add_key(buf_json, "level", 0); + write_elevel_for_json(buf_json, elevel); + json_add_value(buf_json, "msg", message, 0, true); + json_add_value(buf_json, "my_thread_num", str_thread_json, 0, true); + json_add_min(buf_json, JT_END_OBJECT); + } + snprintf(str_pid, sizeof(str_pid), "[%d]:", my_pid); /* @@ -307,12 +366,18 @@ elog_internal(int elevel, bool file_only, const char *message) { if (log_file == NULL) open_logfile(&log_file, logger_config.log_filename ? logger_config.log_filename : LOG_FILENAME_DEFAULT); + if (format_file == JSON) + { + fputs(buf_json->data, log_file); + } + else + { + fprintf(log_file, "%s ", strfbuf); + fprintf(log_file, "%s ", str_pid); + write_elevel(log_file, elevel); - fprintf(log_file, "%s ", strfbuf); - fprintf(log_file, "%s ", str_pid); - write_elevel(log_file, elevel); - - fprintf(log_file, "%s\n", message); + fprintf(log_file, "%s\n", message); + } fflush(log_file); } @@ -326,11 +391,18 @@ elog_internal(int elevel, bool file_only, const char *message) if (error_log_file == NULL) open_logfile(&error_log_file, logger_config.error_log_filename); - fprintf(error_log_file, "%s ", strfbuf); - fprintf(error_log_file, "%s ", str_pid); - write_elevel(error_log_file, elevel); + if (format_file == JSON) + { + fputs(buf_json->data, error_log_file); + } + else + { + fprintf(error_log_file, "%s ", strfbuf); + fprintf(error_log_file, "%s ", str_pid); + write_elevel(error_log_file, elevel); - fprintf(error_log_file, "%s\n", message); + fprintf(error_log_file, "%s\n", message); + } fflush(error_log_file); } @@ -340,35 +412,47 @@ elog_internal(int elevel, bool file_only, const char *message) */ if (write_to_stderr) { - if (is_archive_cmd) + if (format_console == JSON) { - char str_thread[64]; - /* [Issue #213] fix pgbadger parsing */ - snprintf(str_thread, sizeof(str_thread), "[%d-1]:", my_thread_num); - - fprintf(stderr, "%s ", strfbuf); - fprintf(stderr, "%s ", str_pid); - fprintf(stderr, "%s ", str_thread); + fprintf(stderr, "%s", buf_json->data); } - else if (show_color) + else { - /* color WARNING and ERROR messages */ - if (elevel == WARNING) - fprintf(stderr, "%s", TC_YELLOW_BOLD); - else if (elevel == ERROR) - fprintf(stderr, "%s", TC_RED_BOLD); - } + if (is_archive_cmd) + { + char str_thread[64]; + /* [Issue #213] fix pgbadger parsing */ + snprintf(str_thread, sizeof(str_thread), "[%d-1]:", my_thread_num); - write_elevel(stderr, elevel); + fprintf(stderr, "%s ", strfbuf); + fprintf(stderr, "%s ", str_pid); + fprintf(stderr, "%s ", str_thread); + } + else if (show_color) + { + /* color WARNING and ERROR messages */ + if (elevel == WARNING) + fprintf(stderr, "%s", TC_YELLOW_BOLD); + else if (elevel == ERROR) + fprintf(stderr, "%s", TC_RED_BOLD); + } + + write_elevel(stderr, elevel); + + /* main payload */ + fprintf(stderr, "%s", message); - /* main payload */ - fprintf(stderr, "%s", message); + /* reset color to default */ + if (show_color && (elevel == WARNING || elevel == ERROR)) + fprintf(stderr, "%s", TC_RESET); - /* reset color to default */ - if (show_color && (elevel == WARNING || elevel == ERROR)) - fprintf(stderr, "%s", TC_RESET); + fprintf(stderr, "\n"); + } - fprintf(stderr, "\n"); + if (format_file == JSON || format_console == JSON) + { + termPQExpBuffer(buf_json); + } fflush(stderr); } @@ -386,7 +470,15 @@ elog_internal(int elevel, bool file_only, const char *message) static void elog_stderr(int elevel, const char *fmt, ...) { - va_list args; + va_list args; + PQExpBufferData show_buf; + PQExpBuffer buf_json = &show_buf; + time_t log_time = (time_t) time(NULL); + char strfbuf[128]; + char str_pid[128]; + char str_thread_json[64]; + char *message; + int8 format_console; /* * Do not log message if severity level is less than log_level. @@ -397,11 +489,37 @@ elog_stderr(int elevel, const char *fmt, ...) va_start(args, fmt); - write_elevel(stderr, elevel); - vfprintf(stderr, fmt, args); - fputc('\n', stderr); - fflush(stderr); + format_console = logger_config.log_format_console; + if (format_console == JSON) + { + strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", + localtime(&log_time)); + snprintf(str_pid, sizeof(str_pid), "%d", my_pid); + snprintf(str_thread_json, sizeof(str_thread_json), "[%d-1]", my_thread_num); + + initPQExpBuffer(&show_buf); + json_add_min(buf_json, JT_BEGIN_OBJECT); + json_add_value(buf_json, "ts", strfbuf, 0, true); + json_add_value(buf_json, "pid", str_pid, 0, true); + json_add_key(buf_json, "level", 0); + write_elevel_for_json(buf_json, elevel); + message = get_log_message(fmt, args); + json_add_value(buf_json, "msg", message, 0, true); + json_add_value(buf_json, "my_thread_num", str_thread_json, 0, true); + json_add_min(buf_json, JT_END_OBJECT); + fputs(buf_json->data, stderr); + pfree(message); + termPQExpBuffer(buf_json); + } + else + { + write_elevel(stderr, elevel); + vfprintf(stderr, fmt, args); + fputc('\n', stderr); + } + + fflush(stderr); va_end(args); exit_if_necessary(elevel); @@ -570,6 +688,36 @@ parse_log_level(const char *level) return 0; } +int +parse_log_format(const char *format) +{ + const char *v = format; + size_t len; + + if (v == NULL) + { + elog(ERROR, "log-format got invalid value"); + return 0; + } + + /* Skip all spaces detected */ + while (isspace((unsigned char)*v)) + v++; + len = strlen(v); + + if (len == 0) + elog(ERROR, "log-format is empty"); + + if (pg_strncasecmp("plain", v, len) == 0) + return PLAIN; + else if (pg_strncasecmp("json", v, len) == 0) + return JSON; + + /* Log format is invalid */ + elog(ERROR, "invalid log-format \"%s\"", format); + return 0; +} + /* * Converts integer representation of log level to string. */ @@ -599,6 +747,22 @@ deparse_log_level(int level) return NULL; } +const char * +deparse_log_format(int format) +{ + switch (format) + { + case PLAIN: + return "PLAIN"; + case JSON: + return "JSON"; + default: + elog(ERROR, "invalid log-format %d", format); + } + + return NULL; +} + /* * Construct logfile name using timestamp information. * diff --git a/src/utils/logger.h b/src/utils/logger.h index 6a7407e41..adc5061e0 100644 --- a/src/utils/logger.h +++ b/src/utils/logger.h @@ -21,6 +21,9 @@ #define ERROR 1 #define LOG_OFF 10 +#define PLAIN 0 +#define JSON 1 + typedef struct LoggerConfig { int log_level_console; @@ -32,6 +35,8 @@ typedef struct LoggerConfig uint64 log_rotation_size; /* Maximum lifetime of an individual log file in minutes */ uint64 log_rotation_age; + int8 log_format_console; + int8 log_format_file; } LoggerConfig; /* Logger parameters */ @@ -43,6 +48,9 @@ extern LoggerConfig logger_config; #define LOG_LEVEL_CONSOLE_DEFAULT INFO #define LOG_LEVEL_FILE_DEFAULT LOG_OFF +#define LOG_FORMAT_CONSOLE_DEFAULT PLAIN +#define LOG_FORMAT_FILE_DEFAULT PLAIN + #define LOG_FILENAME_DEFAULT "pg_probackup.log" #define LOG_DIRECTORY_DEFAULT "log" @@ -56,4 +64,6 @@ extern void init_console(void); extern int parse_log_level(const char *level); extern const char *deparse_log_level(int level); +extern int parse_log_format(const char *format); +extern const char *deparse_log_format(int format); #endif /* LOGGER_H */ diff --git a/src/utils/parray.c b/src/utils/parray.c index 792e26907..65377c001 100644 --- a/src/utils/parray.c +++ b/src/utils/parray.c @@ -217,3 +217,30 @@ bool parray_contains(parray *array, void *elem) } return false; } + +/* effectively remove elements that satisfy certain criterion */ +void +parray_remove_if(parray *array, criterion_fn criterion, void *args, cleanup_fn clean) { + int i = 0; + int j = 0; + + /* removing certain elements */ + while(j < parray_num(array)) { + void *value = array->data[j]; + // if the value satisfies the criterion, clean it up + if(criterion(value, args)) { + clean(value); + j++; + continue; + } + + if(i != j) + array->data[i] = array->data[j]; + + i++; + j++; + } + + /* adjust the number of used elements */ + array->used -= j - i; +} diff --git a/src/utils/parray.h b/src/utils/parray.h index e92ad728c..08846f252 100644 --- a/src/utils/parray.h +++ b/src/utils/parray.h @@ -16,6 +16,9 @@ */ typedef struct parray parray; +typedef bool (*criterion_fn)(void *value, void *args); +typedef void (*cleanup_fn)(void *ref); + extern parray *parray_new(void); extern void parray_expand(parray *array, size_t newnum); extern void parray_free(parray *array); @@ -32,6 +35,7 @@ extern void *parray_bsearch(parray *array, const void *key, int(*compare)(const extern int parray_bsearch_index(parray *array, const void *key, int(*compare)(const void *, const void *)); extern void parray_walk(parray *array, void (*action)(void *)); extern bool parray_contains(parray *array, void *elem); +extern void parray_remove_if(parray *array, criterion_fn criterion, void *args, cleanup_fn clean); #endif /* PARRAY_H */ diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 2cf0ccbe7..9559fa644 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -993,6 +993,12 @@ pgut_str_strip_trailing_filename(const char *filepath, const char *filename) return pgut_strndup(filepath, fp_len); } +void +pgut_free(void *p) +{ + free(p); +} + FILE * pgut_fopen(const char *path, const char *mode, bool missing_ok) { @@ -1209,13 +1215,16 @@ pgut_pgfnames(const char *path, bool strict) } } + filenames[numnames] = NULL; + if (errno) { elog(strict ? ERROR : WARNING, "could not read directory \"%s\": %m", path); + pgut_pgfnames_cleanup(filenames); + closedir(dir); return NULL; } - filenames[numnames] = NULL; if (closedir(dir)) { diff --git a/src/utils/pgut.h b/src/utils/pgut.h index fa0efe816..1b7b7864c 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -64,6 +64,7 @@ extern void *pgut_realloc(void *p, size_t size); extern char *pgut_strdup(const char *str); extern char *pgut_strndup(const char *str, size_t n); extern char *pgut_str_strip_trailing_filename(const char *filepath, const char *filename); +extern void pgut_free(void *p); #define pgut_new(type) ((type *) pgut_malloc(sizeof(type))) #define pgut_new0(type) ((type *) pgut_malloc0(sizeof(type))) @@ -108,4 +109,20 @@ extern int sleep(unsigned int seconds); extern int usleep(unsigned int usec); #endif +#ifdef _MSC_VER +#define ARG_SIZE_HINT +#else +#define ARG_SIZE_HINT static +#endif + +static inline uint32 hash_mix32_2(uint32 a, uint32 b) +{ + b ^= (a<<7)|(a>>25); + a *= 0xdeadbeef; + b *= 0xcafeabed; + a ^= a >> 16; + b ^= b >> 15; + return a^b; +} + #endif /* PGUT_H */ diff --git a/src/utils/remote.c b/src/utils/remote.c index 2bfd24d1e..7ef8d3239 100644 --- a/src/utils/remote.c +++ b/src/utils/remote.c @@ -147,6 +147,9 @@ bool launch_agent(void) ssh_argv[ssh_argc++] = "-o"; ssh_argv[ssh_argc++] = "Compression=no"; + ssh_argv[ssh_argc++] = "-o"; + ssh_argv[ssh_argc++] = "ControlMaster=no"; + ssh_argv[ssh_argc++] = "-o"; ssh_argv[ssh_argc++] = "LogLevel=error"; @@ -226,7 +229,7 @@ bool launch_agent(void) return false; } else { #endif - elog(LOG, "Start SSH client process, pid %d", child_pid); + elog(LOG, "Start SSH client process, pid %d, cmd \"%s\"", child_pid, cmd); SYS_CHECK(close(infd[1])); /* These are being used by the child */ SYS_CHECK(close(outfd[0])); SYS_CHECK(close(errfd[1])); @@ -235,10 +238,114 @@ bool launch_agent(void) fio_redirect(infd[0], outfd[1], errfd[0]); /* write to stdout */ } - /* Make sure that remote agent has the same version - * TODO: we must also check PG version and fork edition - */ - agent_version = fio_get_agent_version(); + + /* Make sure that remote agent has the same version, fork and other features to be binary compatible */ + { + char payload_buf[1024]; + fio_get_agent_version(&agent_version, payload_buf, sizeof payload_buf); + check_remote_agent_compatibility(agent_version, payload_buf, sizeof payload_buf); + } + + return true; +} + +#ifdef PGPRO_EDITION +/* PGPRO 10-13 checks to be "(certified)", with exceptional case PGPRO_11 conforming to "(standard certified)" */ +static bool check_certified() +{ + return strstr(PGPRO_VERSION_STR, "(certified)") || + strstr(PGPRO_VERSION_STR, "(standard certified)"); +} +#endif + +static char* extract_pg_edition_str() +{ + static char *vanilla = "vanilla"; +#ifdef PGPRO_EDITION + static char *_1C = "1C"; + static char *std = "standard"; + static char *ent = "enterprise"; + static char *std_cert = "standard-certified"; + static char *ent_cert = "enterprise-certified"; + + if (strcmp(PGPRO_EDITION, _1C) == 0) + return vanilla; + + if (PG_VERSION_NUM < 100000) + return PGPRO_EDITION; + + /* these "certified" checks are applicable to PGPRO from 10 up to 12 versions. + * 13+ certified versions are compatible to non-certified ones */ + if (PG_VERSION_NUM < 130000 && check_certified()) + { + if (strcmp(PGPRO_EDITION, std) == 0) + return std_cert; + else if (strcmp(PGPRO_EDITION, ent) == 0) + return ent_cert; + else + Assert("Bad #define PGPRO_EDITION value" == 0); + } + + return PGPRO_EDITION; +#else + return vanilla; +#endif +} + +#define COMPATIBILITY_VAL_STR(macro) { #macro, macro, 0 } +#define COMPATIBILITY_VAL_INT(macro) { #macro, NULL, macro } + +#define COMPATIBILITY_VAL_SEPARATOR "=" +#define COMPATIBILITY_LINE_SEPARATOR "\n" + +/* + * Compose compatibility string to be sent by pg_probackup agent + * through ssh and to be verified by pg_probackup peer. + * Compatibility string contains postgres essential vars as strings + * in format "var_name" + COMPATIBILITY_VAL_SEPARATOR + "var_value" + COMPATIBILITY_LINE_SEPARATOR + */ +size_t prepare_compatibility_str(char* compatibility_buf, size_t compatibility_buf_size) +{ + typedef struct compatibility_param_tag { + const char* name; + const char* strval; + int intval; + } compatibility_param; + + compatibility_param compatibility_params[] = { + COMPATIBILITY_VAL_STR(PG_MAJORVERSION), + { "edition", extract_pg_edition_str(), 0 }, + COMPATIBILITY_VAL_INT(SIZEOF_VOID_P), + }; + + size_t result_size = 0; + int i; + *compatibility_buf = '\0'; + + for (i = 0; i < (sizeof compatibility_params / sizeof(compatibility_param)); i++) + { + if (compatibility_params[i].strval != NULL) + result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, + "%s" COMPATIBILITY_VAL_SEPARATOR "%s" COMPATIBILITY_LINE_SEPARATOR, + compatibility_params[i].name, + compatibility_params[i].strval); + else + result_size += snprintf(compatibility_buf + result_size, compatibility_buf_size - result_size, + "%s" COMPATIBILITY_VAL_SEPARATOR "%d" COMPATIBILITY_LINE_SEPARATOR, + compatibility_params[i].name, + compatibility_params[i].intval); + Assert(result_size < compatibility_buf_size); + } + return result_size + 1; +} + +/* + * Check incoming remote agent's compatibility params for equality to local ones. + */ +void check_remote_agent_compatibility(int agent_version, char *compatibility_str, size_t compatibility_str_max_size) +{ + elog(LOG, "Agent version=%d\n", agent_version); + if (agent_version != AGENT_PROTOCOL_VERSION) { char agent_version_str[1024]; @@ -252,5 +359,21 @@ bool launch_agent(void) agent_version_str, AGENT_PROTOCOL_VERSION_STR); } - return true; + /* checking compatibility params */ + if (strnlen(compatibility_str, compatibility_str_max_size) == compatibility_str_max_size) + { + elog(ERROR, "Corrupted remote compatibility protocol: compatibility string has no terminating \\0"); + } + + elog(LOG, "Agent compatibility params:\n%s", compatibility_str); + + { + char buf[1024]; + + prepare_compatibility_str(buf, sizeof buf); + if(strcmp(compatibility_str, buf)) + { + elog(ERROR, "Incompatible remote agent params, expected:\n%s, actual:\n:%s", buf, compatibility_str); + } + } } diff --git a/src/validate.c b/src/validate.c index 4044ac158..0887b2e7a 100644 --- a/src/validate.c +++ b/src/validate.c @@ -63,18 +63,18 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) elog(ERROR, "pg_probackup binary version is %s, but backup %s version is %s. " "pg_probackup do not guarantee to be forward compatible. " "Please upgrade pg_probackup binary.", - PROGRAM_VERSION, base36enc(backup->start_time), backup->program_version); + PROGRAM_VERSION, backup_id_of(backup), backup->program_version); /* Check backup server version */ if (strcmp(backup->server_version, PG_MAJORVERSION) != 0) elog(ERROR, "Backup %s has server version %s, but current pg_probackup binary " "compiled with server version %s", - base36enc(backup->start_time), backup->server_version, PG_MAJORVERSION); + backup_id_of(backup), backup->server_version, PG_MAJORVERSION); if (backup->status == BACKUP_STATUS_RUNNING) { elog(WARNING, "Backup %s has status %s, change it to ERROR and skip validation", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); write_backup_status(backup, BACKUP_STATUS_ERROR, true); corrupted_backup_found = true; return; @@ -88,7 +88,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) backup->status != BACKUP_STATUS_CORRUPT) { elog(WARNING, "Backup %s has status %s. Skip validation.", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); corrupted_backup_found = true; return; } @@ -98,28 +98,28 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) backup->status == BACKUP_STATUS_MERGING) { elog(WARNING, "Full backup %s has status %s, skip validation", - base36enc(backup->start_time), status2str(backup->status)); + backup_id_of(backup), status2str(backup->status)); return; } if (backup->status == BACKUP_STATUS_OK || backup->status == BACKUP_STATUS_DONE || backup->status == BACKUP_STATUS_MERGING) - elog(INFO, "Validating backup %s", base36enc(backup->start_time)); + elog(INFO, "Validating backup %s", backup_id_of(backup)); else - elog(INFO, "Revalidating backup %s", base36enc(backup->start_time)); + elog(INFO, "Revalidating backup %s", backup_id_of(backup)); if (backup->backup_mode != BACKUP_MODE_FULL && backup->backup_mode != BACKUP_MODE_DIFF_PAGE && backup->backup_mode != BACKUP_MODE_DIFF_PTRACK && backup->backup_mode != BACKUP_MODE_DIFF_DELTA) - elog(WARNING, "Invalid backup_mode of backup %s", base36enc(backup->start_time)); + elog(WARNING, "Invalid backup_mode of backup %s", backup_id_of(backup)); join_path_components(external_prefix, backup->root_dir, EXTERNAL_DIR); files = get_backup_filelist(backup, false); if (!files) { - elog(WARNING, "Backup %s file list is corrupted", base36enc(backup->start_time)); + elog(WARNING, "Backup %s file list is corrupted", backup_id_of(backup)); backup->status = BACKUP_STATUS_CORRUPT; write_backup_status(backup, BACKUP_STATUS_CORRUPT, true); return; @@ -189,9 +189,9 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) BACKUP_STATUS_OK, true); if (corrupted) - elog(WARNING, "Backup %s data files are corrupted", base36enc(backup->start_time)); + elog(WARNING, "Backup %s data files are corrupted", backup_id_of(backup)); else - elog(INFO, "Backup %s data files are valid", base36enc(backup->start_time)); + elog(INFO, "Backup %s data files are valid", backup_id_of(backup)); /* Issue #132 kludge */ if (!corrupted && @@ -208,7 +208,7 @@ pgBackupValidate(pgBackup *backup, pgRestoreParams *params) elog(WARNING, "Backup %s is a victim of metadata corruption. " "Additional information can be found here: " "https://github.com/postgrespro/pg_probackup/issues/132", - base36enc(backup->start_time)); + backup_id_of(backup)); backup->status = BACKUP_STATUS_CORRUPT; write_backup_status(backup, BACKUP_STATUS_CORRUPT, true); } @@ -394,15 +394,13 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) /* open directory and list contents */ dir = opendir(catalogState->backup_subdir_path); if (dir == NULL) - elog(ERROR, "cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno)); + elog(ERROR, "Cannot open directory \"%s\": %s", catalogState->backup_subdir_path, strerror(errno)); errno = 0; while ((dent = readdir(dir))) { char child[MAXPGPATH]; struct stat st; - InstanceState *instanceState; - /* skip entries point current dir or parent dir */ if (strcmp(dent->d_name, ".") == 0 || @@ -412,7 +410,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) join_path_components(child, catalogState->backup_subdir_path, dent->d_name); if (lstat(child, &st) == -1) - elog(ERROR, "cannot stat file \"%s\": %s", child, strerror(errno)); + elog(ERROR, "Cannot stat file \"%s\": %s", child, strerror(errno)); if (!S_ISDIR(st.st_mode)) continue; @@ -420,7 +418,7 @@ do_validate_all(CatalogState *catalogState, InstanceState *instanceState) /* * Initialize instance configuration. */ - instanceState = pgut_new(InstanceState); + instanceState = pgut_new(InstanceState); /* memory leak */ strncpy(instanceState->instance_name, dent->d_name, MAXPGPATH); join_path_components(instanceState->instance_backup_subdir_path, @@ -503,10 +501,12 @@ do_validate_instance(InstanceState *instanceState) /* chain is broken */ if (result == ChainIsBroken) { - char *parent_backup_id; + const char *parent_backup_id; + const char *current_backup_id; /* determine missing backup ID */ - parent_backup_id = base36enc_dup(tmp_backup->parent_backup); + parent_backup_id = base36enc(tmp_backup->parent_backup); + current_backup_id = backup_id_of(current_backup); corrupted_backup_found = true; /* orphanize current_backup */ @@ -515,15 +515,13 @@ do_validate_instance(InstanceState *instanceState) { write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s is missing", - base36enc(current_backup->start_time), - parent_backup_id); + current_backup_id, parent_backup_id); } else { elog(WARNING, "Backup %s has missing parent %s", - base36enc(current_backup->start_time), parent_backup_id); + current_backup_id, parent_backup_id); } - pg_free(parent_backup_id); continue; } /* chain is whole, but at least one parent is invalid */ @@ -532,23 +530,23 @@ do_validate_instance(InstanceState *instanceState) /* Oldest corrupt backup has a chance for revalidation */ if (current_backup->start_time != tmp_backup->start_time) { - char *backup_id = base36enc_dup(tmp_backup->start_time); /* orphanize current_backup */ if (current_backup->status == BACKUP_STATUS_OK || current_backup->status == BACKUP_STATUS_DONE) { write_backup_status(current_backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", - base36enc(current_backup->start_time), backup_id, + backup_id_of(current_backup), + backup_id_of(tmp_backup), status2str(tmp_backup->status)); } else { elog(WARNING, "Backup %s has parent %s with status: %s", - base36enc(current_backup->start_time), backup_id, + backup_id_of(current_backup), + backup_id_of(tmp_backup), status2str(tmp_backup->status)); } - pg_free(backup_id); continue; } base_full_backup = find_parent_full_backup(current_backup); @@ -556,7 +554,7 @@ do_validate_instance(InstanceState *instanceState) /* sanity */ if (!base_full_backup) elog(ERROR, "Parent full backup for the given backup %s was not found", - base36enc(current_backup->start_time)); + backup_id_of(current_backup)); } /* chain is whole, all parents are valid at first glance, * current backup validation can proceed @@ -571,7 +569,7 @@ do_validate_instance(InstanceState *instanceState) if (!lock_backup(current_backup, true, false)) { elog(WARNING, "Cannot lock backup %s directory, skip validation", - base36enc(current_backup->start_time)); + backup_id_of(current_backup)); skipped_due_to_lock = true; continue; } @@ -589,7 +587,6 @@ do_validate_instance(InstanceState *instanceState) */ if (current_backup->status != BACKUP_STATUS_OK) { - char *current_backup_id; /* This is ridiculous but legal. * PAGE_b2 <- OK * PAGE_a2 <- OK @@ -599,7 +596,6 @@ do_validate_instance(InstanceState *instanceState) */ corrupted_backup_found = true; - current_backup_id = base36enc_dup(current_backup->start_time); for (j = i - 1; j >= 0; j--) { @@ -613,13 +609,12 @@ do_validate_instance(InstanceState *instanceState) write_backup_status(backup, BACKUP_STATUS_ORPHAN, true); elog(WARNING, "Backup %s is orphaned because his parent %s has status: %s", - base36enc(backup->start_time), - current_backup_id, + backup_id_of(backup), + backup_id_of(current_backup), status2str(current_backup->status)); } } } - free(current_backup_id); } /* For every OK backup we try to revalidate all his ORPHAN descendants. */ @@ -666,7 +661,7 @@ do_validate_instance(InstanceState *instanceState) if (!lock_backup(backup, true, false)) { elog(WARNING, "Cannot lock backup %s directory, skip validation", - base36enc(backup->start_time)); + backup_id_of(backup)); skipped_due_to_lock = true; continue; } @@ -737,7 +732,7 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) if (!fileExists(map_path, FIO_BACKUP_HOST)) elog(ERROR, "Tablespace map is missing: \"%s\", " "probably backup %s is corrupt, validate it", - map_path, base36enc(backup->backup_id)); + map_path, backup_id_of(backup)); /* check tablespace map checksumms */ if (!no_validate) @@ -747,7 +742,7 @@ validate_tablespace_map(pgBackup *backup, bool no_validate) if ((*tablespace_map)->crc != crc) elog(ERROR, "Invalid CRC of tablespace map file \"%s\" : %X. Expected %X, " "probably backup %s is corrupt, validate it", - map_path, crc, (*tablespace_map)->crc, base36enc(backup->backup_id)); + map_path, crc, (*tablespace_map)->crc, backup_id_of(backup)); } pgFileFree(dummy); diff --git a/tests/CVE_2018_1058.py b/tests/CVE_2018_1058_test.py similarity index 85% rename from tests/CVE_2018_1058.py rename to tests/CVE_2018_1058_test.py index 3da41f116..cfd55cc60 100644 --- a/tests/CVE_2018_1058.py +++ b/tests/CVE_2018_1058_test.py @@ -2,17 +2,14 @@ import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'CVE-2018-1058' - class CVE_2018_1058(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_basic_default_search_path(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True) self.init_pb(backup_dir) @@ -31,16 +28,12 @@ def test_basic_default_search_path(self): self.backup_node(backup_dir, 'node', node, backup_type='full', options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_backup_modified_search_path(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True) self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) @@ -77,15 +70,11 @@ def test_basic_backup_modified_search_path(self): self.assertFalse( 'pg_probackup vulnerable!' in log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_checkdb_modified_search_path(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.set_auto_conf(node, options={'search_path': 'public,pg_catalog'}) node.slow_start() @@ -138,6 +127,3 @@ def test_basic_checkdb_modified_search_path(self): e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/Readme.md b/tests/Readme.md index 668552c94..11c5272f9 100644 --- a/tests/Readme.md +++ b/tests/Readme.md @@ -1,4 +1,4 @@ -[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup) +****[see wiki](https://confluence.postgrespro.ru/display/DEV/pg_probackup) ``` Note: For now these tests work on Linux and "kinda" work on Windows @@ -31,7 +31,7 @@ Remote backup depends on key authentication to local machine via ssh as current export PGPROBACKUP_SSH_REMOTE=ON Run tests that are relied on advanced debugging features. For this mode, pg_probackup should be compiled without optimizations. For example: -CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests +CFLAGS="-O0" ./configure --prefix=/path/to/prefix --enable-debug --enable-cassert --enable-depend --enable-tap-tests --enable-nls export PGPROBACKUP_GDB=ON @@ -41,6 +41,8 @@ Run suit of basic simple tests: Run ptrack tests: export PG_PROBACKUP_PTRACK=ON +Run long (time consuming) tests: + export PG_PROBACKUP_LONG=ON Usage: sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope @@ -48,3 +50,20 @@ Usage: export PG_CONFIG=/path/to/pg_config python -m unittest [-v] tests[.specific_module][.class.test] ``` + +# Troubleshooting FAQ + +## Python tests failure +### 1. Could not open extension "..." +``` +testgres.exceptions.QueryException ERROR: could not open extension control file "/share/extension/amcheck.control": No such file or directory +``` + +#### Solution: + +You have no `/contrib/...` extension installed, please do + +```commandline +cd +make install-world +``` diff --git a/tests/__init__.py b/tests/__init__.py index 55d6ea9be..c8d2c70c3 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,13 +1,13 @@ import unittest import os -from . import init, merge, option, show, compatibility, \ - backup, delete, delta, restore, validate, \ - retention, pgpro560, pgpro589, pgpro2068, false_positive, replica, \ - compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \ - cfs_validate_backup, auth_test, time_stamp, logging, \ - locking, remote, external, config, checkdb, set_backup, incr_restore, \ - catchup, CVE_2018_1058 +from . import init_test, merge_test, option_test, show_test, compatibility_test, \ + backup_test, delete_test, delta_test, restore_test, validate_test, \ + retention_test, pgpro560_test, pgpro589_test, pgpro2068_test, false_positive_test, replica_test, \ + compression_test, page_test, ptrack_test, archive_test, exclude_test, cfs_backup_test, cfs_restore_test, \ + cfs_validate_backup_test, auth_test, time_stamp_test, logging_test, \ + locking_test, remote_test, external_test, config_test, checkdb_test, set_backup_test, incr_restore_test, \ + catchup_test, CVE_2018_1058_test, time_consuming_test def load_tests(loader, tests, pattern): @@ -19,44 +19,50 @@ def load_tests(loader, tests, pattern): if 'PG_PROBACKUP_PTRACK' in os.environ: if os.environ['PG_PROBACKUP_PTRACK'] == 'ON': - suite.addTests(loader.loadTestsFromModule(ptrack)) + suite.addTests(loader.loadTestsFromModule(ptrack_test)) -# suite.addTests(loader.loadTestsFromModule(auth_test)) - suite.addTests(loader.loadTestsFromModule(archive)) - suite.addTests(loader.loadTestsFromModule(backup)) - suite.addTests(loader.loadTestsFromModule(catchup)) + # PG_PROBACKUP_LONG section for tests that are long + # by design e.g. they contain loops, sleeps and so on + if 'PG_PROBACKUP_LONG' in os.environ: + if os.environ['PG_PROBACKUP_LONG'] == 'ON': + suite.addTests(loader.loadTestsFromModule(time_consuming_test)) + + suite.addTests(loader.loadTestsFromModule(auth_test)) + suite.addTests(loader.loadTestsFromModule(archive_test)) + suite.addTests(loader.loadTestsFromModule(backup_test)) + suite.addTests(loader.loadTestsFromModule(catchup_test)) if 'PGPROBACKUPBIN_OLD' in os.environ and os.environ['PGPROBACKUPBIN_OLD']: - suite.addTests(loader.loadTestsFromModule(compatibility)) - suite.addTests(loader.loadTestsFromModule(checkdb)) - suite.addTests(loader.loadTestsFromModule(config)) -# suite.addTests(loader.loadTestsFromModule(cfs_backup)) -# suite.addTests(loader.loadTestsFromModule(cfs_restore)) -# suite.addTests(loader.loadTestsFromModule(cfs_validate_backup)) - suite.addTests(loader.loadTestsFromModule(compression)) - suite.addTests(loader.loadTestsFromModule(delete)) - suite.addTests(loader.loadTestsFromModule(delta)) - suite.addTests(loader.loadTestsFromModule(exclude)) - suite.addTests(loader.loadTestsFromModule(external)) - suite.addTests(loader.loadTestsFromModule(false_positive)) - suite.addTests(loader.loadTestsFromModule(init)) - suite.addTests(loader.loadTestsFromModule(incr_restore)) - suite.addTests(loader.loadTestsFromModule(locking)) - suite.addTests(loader.loadTestsFromModule(logging)) - suite.addTests(loader.loadTestsFromModule(merge)) - suite.addTests(loader.loadTestsFromModule(option)) - suite.addTests(loader.loadTestsFromModule(page)) - suite.addTests(loader.loadTestsFromModule(pgpro560)) - suite.addTests(loader.loadTestsFromModule(pgpro589)) - suite.addTests(loader.loadTestsFromModule(pgpro2068)) - suite.addTests(loader.loadTestsFromModule(remote)) - suite.addTests(loader.loadTestsFromModule(replica)) - suite.addTests(loader.loadTestsFromModule(restore)) - suite.addTests(loader.loadTestsFromModule(retention)) - suite.addTests(loader.loadTestsFromModule(set_backup)) - suite.addTests(loader.loadTestsFromModule(show)) - suite.addTests(loader.loadTestsFromModule(time_stamp)) - suite.addTests(loader.loadTestsFromModule(validate)) - suite.addTests(loader.loadTestsFromModule(CVE_2018_1058)) + suite.addTests(loader.loadTestsFromModule(compatibility_test)) + suite.addTests(loader.loadTestsFromModule(checkdb_test)) + suite.addTests(loader.loadTestsFromModule(config_test)) + suite.addTests(loader.loadTestsFromModule(cfs_backup_test)) + suite.addTests(loader.loadTestsFromModule(cfs_restore_test)) + suite.addTests(loader.loadTestsFromModule(cfs_validate_backup_test)) + suite.addTests(loader.loadTestsFromModule(compression_test)) + suite.addTests(loader.loadTestsFromModule(delete_test)) + suite.addTests(loader.loadTestsFromModule(delta_test)) + suite.addTests(loader.loadTestsFromModule(exclude_test)) + suite.addTests(loader.loadTestsFromModule(external_test)) + suite.addTests(loader.loadTestsFromModule(false_positive_test)) + suite.addTests(loader.loadTestsFromModule(init_test)) + suite.addTests(loader.loadTestsFromModule(incr_restore_test)) + suite.addTests(loader.loadTestsFromModule(locking_test)) + suite.addTests(loader.loadTestsFromModule(logging_test)) + suite.addTests(loader.loadTestsFromModule(merge_test)) + suite.addTests(loader.loadTestsFromModule(option_test)) + suite.addTests(loader.loadTestsFromModule(page_test)) + suite.addTests(loader.loadTestsFromModule(pgpro560_test)) + suite.addTests(loader.loadTestsFromModule(pgpro589_test)) + suite.addTests(loader.loadTestsFromModule(pgpro2068_test)) + suite.addTests(loader.loadTestsFromModule(remote_test)) + suite.addTests(loader.loadTestsFromModule(replica_test)) + suite.addTests(loader.loadTestsFromModule(restore_test)) + suite.addTests(loader.loadTestsFromModule(retention_test)) + suite.addTests(loader.loadTestsFromModule(set_backup_test)) + suite.addTests(loader.loadTestsFromModule(show_test)) + suite.addTests(loader.loadTestsFromModule(time_stamp_test)) + suite.addTests(loader.loadTestsFromModule(validate_test)) + suite.addTests(loader.loadTestsFromModule(CVE_2018_1058_test)) return suite diff --git a/tests/archive.py b/tests/archive_test.py similarity index 86% rename from tests/archive.py rename to tests/archive_test.py index 22b9d8693..00fd1f592 100644 --- a/tests/archive.py +++ b/tests/archive_test.py @@ -3,6 +3,7 @@ import gzip import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException, GdbException +from .helpers.data_helpers import tail_file from datetime import datetime, timedelta import subprocess from sys import exit @@ -10,19 +11,15 @@ from distutils.dir_util import copy_tree -module_name = 'archive' - - class ArchiveTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure # @unittest.skip("skip") def test_pgpro434_1(self): """Description in jira issue PGPRO-434""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -39,7 +36,7 @@ def test_pgpro434_1(self): "md5(repeat(i::text,10))::tsvector as tsvector from " "generate_series(0,100) i") - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node) node.cleanup() @@ -62,10 +59,8 @@ def test_pgpro434_1(self): node.slow_start() self.assertEqual( - result, node.safe_psql("postgres", "SELECT * FROM t_heap"), + result, node.table_checksum("t_heap"), 'data after restore not equal to original data') - # Clean after yourself - self.del_test_dir(module_name, fname) # @unittest.skip("skip") # @unittest.expectedFailure @@ -74,10 +69,9 @@ def test_pgpro434_2(self): Check that timelines are correct. WAITING PGPRO-1053 for --immediate """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -85,8 +79,7 @@ def test_pgpro434_2(self): ) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') self.init_pb(backup_dir) @@ -160,7 +153,7 @@ def test_pgpro434_2(self): backup_id = self.backup_node(backup_dir, 'node', node) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") node.safe_psql( "postgres", "insert into t_heap select 100503 as id, md5(i::text) as text, " @@ -212,26 +205,20 @@ def test_pgpro434_2(self): "select exists(select 1 from t_heap where id > 100500)")[0][0], 'data after restore not equal to original data') - self.assertEqual( - result, - node.safe_psql( - "postgres", - "SELECT * FROM t_heap"), + self.assertEqual(result, node.table_checksum("t_heap"), 'data after restore not equal to original data') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pgpro434_3(self): """ Check pg_stop_backup_timeout, needed backup_timeout Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -248,6 +235,7 @@ def test_pgpro434_3(self): "--log-level-file=LOG"], gdb=True) + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() @@ -281,19 +269,17 @@ def test_pgpro434_3(self): log_content, 'PostgreSQL crashed because of a failed assert') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pgpro434_4(self): """ Check pg_stop_backup_timeout, libpq-timeout requested. Fixed in commit d84d79668b0c139 and assert fixed by ptrack 1.7 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -310,10 +296,11 @@ def test_pgpro434_4(self): "--log-level-file=info"], gdb=True) + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() - self.set_auto_conf(node, {'archive_command': "'exit 1'"}) + self.set_auto_conf(node, {'archive_command': 'exit 1'}) node.reload() os.environ["PGAPPNAME"] = "foo" @@ -327,7 +314,10 @@ def test_pgpro434_4(self): os.environ["PGAPPNAME"] = "pg_probackup" postgres_gdb = self.gdb_attach(pid) - postgres_gdb.set_breakpoint('do_pg_stop_backup') + if self.get_version(node) < 150000: + postgres_gdb.set_breakpoint('do_pg_stop_backup') + else: + postgres_gdb.set_breakpoint('do_pg_backup_stop') postgres_gdb.continue_execution_until_running() gdb.continue_execution_until_exit() @@ -337,9 +327,14 @@ def test_pgpro434_4(self): with open(log_file, 'r') as f: log_content = f.read() - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", + log_content) + else: + self.assertIn( + "ERROR: pg_backup_stop doesn't answer in 60 seconds, cancel it", + log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: @@ -350,16 +345,12 @@ def test_pgpro434_4(self): log_content, 'PostgreSQL crashed because of a failed assert') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_push_file_exists(self): """Archive-push if file exists""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -393,26 +384,31 @@ def test_archive_push_file_exists(self): self.switch_wal_segment(node) sleep(1) - with open(log_file, 'r') as f: - log_content = f.read() + log = tail_file(log_file, linetimeout=30, totaltimeout=120, + collect=True) + log.wait(contains = 'The failed archive command was') + self.assertIn( 'LOG: archive command failed with exit code 1', - log_content) + log.content) self.assertIn( 'DETAIL: The failed archive command was:', - log_content) + log.content) self.assertIn( 'pg_probackup archive-push WAL file', - log_content) + log.content) self.assertIn( 'WAL file already exists in archive with different checksum', - log_content) + log.content) self.assertNotIn( - 'pg_probackup archive-push completed successfully', log_content) + 'pg_probackup archive-push completed successfully', log.content) + + # btw check that console coloring codes are not slipped into log file + self.assertNotIn('[0m', log.content) if self.get_version(node) < 100000: wal_src = os.path.join( @@ -429,30 +425,16 @@ def test_archive_push_file_exists(self): shutil.copyfile(wal_src, file) self.switch_wal_segment(node) - sleep(5) - - with open(log_file, 'r') as f: - log_content = f.read() - - self.assertIn( - 'pg_probackup archive-push completed successfully', - log_content) - - # btw check that console coloring codes are not slipped into log file - self.assertNotIn('[0m', log_content) - - print(log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) + log.stop_collect() + log.wait(contains = 'pg_probackup archive-push completed successfully') # @unittest.skip("skip") def test_archive_push_file_exists_overwrite(self): """Archive-push if file exists""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'checkpoint_timeout': '30s'}) @@ -485,50 +467,42 @@ def test_archive_push_file_exists_overwrite(self): self.switch_wal_segment(node) sleep(1) - with open(log_file, 'r') as f: - log_content = f.read() + log = tail_file(log_file, linetimeout=30, collect=True) + log.wait(contains = 'The failed archive command was') self.assertIn( - 'LOG: archive command failed with exit code 1', log_content) + 'LOG: archive command failed with exit code 1', log.content) self.assertIn( - 'DETAIL: The failed archive command was:', log_content) + 'DETAIL: The failed archive command was:', log.content) self.assertIn( - 'pg_probackup archive-push WAL file', log_content) + 'pg_probackup archive-push WAL file', log.content) self.assertNotIn( 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) + 'different checksum, overwriting', log.content) self.assertIn( 'WAL file already exists in archive with ' - 'different checksum', log_content) + 'different checksum', log.content) self.assertNotIn( - 'pg_probackup archive-push completed successfully', log_content) + 'pg_probackup archive-push completed successfully', log.content) self.set_archiving(backup_dir, 'node', node, overwrite=True) node.reload() self.switch_wal_segment(node) - sleep(5) - with open(log_file, 'r') as f: - log_content = f.read() - self.assertTrue( - 'pg_probackup archive-push completed successfully' in log_content, - 'Expecting messages about successfull execution archive_command') + log.drop_content() + log.wait(contains = 'pg_probackup archive-push completed successfully') self.assertIn( 'WAL file already exists in archive with ' - 'different checksum, overwriting', log_content) - - # Clean after yourself - self.del_test_dir(module_name, fname) + 'different checksum, overwriting', log.content) # @unittest.skip("skip") def test_archive_push_partial_file_exists(self): """Archive-push if stale '.part' file exists""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -602,16 +576,12 @@ def test_archive_push_partial_file_exists(self): 'Reusing stale temp WAL file', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_push_part_file_exists_not_stale(self): """Archive-push if .part file exists and it is not stale""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -682,9 +652,6 @@ def test_archive_push_part_file_exists_not_stale(self): # 'is not stale', # log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_replica_archive(self): @@ -693,10 +660,9 @@ def test_replica_archive(self): turn it into replica, set replica with archiving, make archive backup from replica """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -705,8 +671,7 @@ def test_replica_archive(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -715,7 +680,7 @@ def test_replica_archive(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() master.psql( @@ -725,7 +690,7 @@ def test_replica_archive(self): "from generate_series(0,2560) i") self.backup_node(backup_dir, 'master', master, options=['--stream']) - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") # Settings for Replica self.restore_node(backup_dir, 'master', replica) @@ -736,7 +701,7 @@ def test_replica_archive(self): replica.slow_start(replica=True) # Check data correctness on replica - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, take FULL backup from replica, @@ -747,7 +712,7 @@ def test_replica_archive(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'replica', replica, @@ -764,14 +729,14 @@ def test_replica_archive(self): # RESTORE FULL BACKUP TAKEN FROM replica node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) self.set_auto_conf(node, {'port': node.port}) node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, make PAGE backup from replica, @@ -783,7 +748,7 @@ def test_replica_archive(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,80680) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") self.wait_until_replica_catch_with_master(master, replica) @@ -810,12 +775,9 @@ def test_replica_archive(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_master_and_replica_parallel_archiving(self): @@ -825,10 +787,9 @@ def test_master_and_replica_parallel_archiving(self): set replica with archiving, make archive backup from replica, make archive backup from master """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -836,12 +797,11 @@ def test_master_and_replica_parallel_archiving(self): ) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.init_pb(backup_dir) @@ -859,7 +819,7 @@ def test_master_and_replica_parallel_archiving(self): # TAKE FULL ARCHIVE BACKUP FROM MASTER self.backup_node(backup_dir, 'master', master) # GET LOGICAL CONTENT FROM MASTER - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") # GET PHYSICAL CONTENT FROM MASTER pgdata_master = self.pgdata_content(master.data_dir) @@ -877,7 +837,7 @@ def test_master_and_replica_parallel_archiving(self): replica.slow_start(replica=True) # CHECK LOGICAL CORRECTNESS on REPLICA - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) master.psql( @@ -905,9 +865,6 @@ def test_master_and_replica_parallel_archiving(self): self.assertEqual( 'OK', self.show_pb(backup_dir, 'master', backup_id)['status']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_basic_master_and_replica_concurrent_archiving(self): @@ -918,12 +875,11 @@ def test_basic_master_and_replica_concurrent_archiving(self): make sure that archiving on both node is working. """ if self.pg_config_version < self.version_to_num('9.6.0'): - return unittest.skip('You need PostgreSQL >= 9.6 for this test') + self.skipTest('You need PostgreSQL >= 9.6 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -931,12 +887,11 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.init_pb(backup_dir) @@ -956,7 +911,7 @@ def test_basic_master_and_replica_concurrent_archiving(self): # TAKE FULL ARCHIVE BACKUP FROM MASTER self.backup_node(backup_dir, 'master', master) # GET LOGICAL CONTENT FROM MASTER - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") # GET PHYSICAL CONTENT FROM MASTER pgdata_master = self.pgdata_content(master.data_dir) @@ -975,7 +930,7 @@ def test_basic_master_and_replica_concurrent_archiving(self): replica.slow_start(replica=True) # CHECK LOGICAL CORRECTNESS on REPLICA - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) master.psql( @@ -1009,10 +964,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): self.backup_node(backup_dir, 'master', master) self.backup_node(backup_dir, 'master', replica) - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.expectedFailure # @unittest.skip("skip") def test_concurrent_archiving(self): @@ -1024,12 +975,11 @@ def test_concurrent_archiving(self): """ if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) @@ -1045,7 +995,7 @@ def test_concurrent_archiving(self): # Settings for Replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -1056,7 +1006,7 @@ def test_concurrent_archiving(self): # create cascade replicas replica1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica1')) + base_dir=os.path.join(self.module_name, self.fname, 'replica1')) replica1.cleanup() # Settings for casaced replica @@ -1092,17 +1042,13 @@ def test_concurrent_archiving(self): log_content = f.read() self.assertNotIn('different checksum', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_pg_receivexlog(self): """Test backup with pg_receivexlog wal delivary method""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1149,7 +1095,7 @@ def test_archive_pg_receivexlog(self): node, backup_type='page' ) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") self.validate_pb(backup_dir) # Check data correctness @@ -1159,23 +1105,19 @@ def test_archive_pg_receivexlog(self): self.assertEqual( result, - node.safe_psql( - "postgres", "SELECT * FROM t_heap" - ), + node.table_checksum("t_heap"), 'data after restore not equal to original data') # Clean after yourself pg_receivexlog.kill() - self.del_test_dir(module_name, fname) # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_pg_receivexlog_compression_pg10(self): """Test backup with pg_receivewal compressed wal delivary method""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1185,7 +1127,7 @@ def test_archive_pg_receivexlog_compression_pg10(self): self.add_instance(backup_dir, 'node', node) node.slow_start() if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') + self.skipTest('You need PostgreSQL >= 10 for this test') else: pg_receivexlog_path = self.get_bin_path('pg_receivewal') @@ -1220,7 +1162,7 @@ def test_archive_pg_receivexlog_compression_pg10(self): backup_dir, 'node', node, backup_type='page' ) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") self.validate_pb(backup_dir) # Check data correctness @@ -1229,12 +1171,11 @@ def test_archive_pg_receivexlog_compression_pg10(self): node.slow_start() self.assertEqual( - result, node.safe_psql("postgres", "SELECT * FROM t_heap"), + result, node.table_checksum("t_heap"), 'data after restore not equal to original data') # Clean after yourself pg_receivexlog.kill() - self.del_test_dir(module_name, fname) # @unittest.expectedFailure # @unittest.skip("skip") @@ -1255,10 +1196,9 @@ def test_archive_catalog(self): ARCHIVE master: t1 -Z1--Z2--- """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1266,8 +1206,7 @@ def test_archive_catalog(self): 'checkpoint_timeout': '30s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1296,7 +1235,7 @@ def test_archive_catalog(self): backup_dir, 'master', master, backup_type='page') replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) self.set_replica(master, replica) @@ -1565,8 +1504,6 @@ def test_archive_catalog(self): self.assertEqual(timeline_2['parent-tli'], 1) self.assertEqual(timeline_1['parent-tli'], 0) - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_catalog_1(self): @@ -1577,10 +1514,9 @@ def test_archive_catalog_1(self): self.skipTest('You need to enable ARCHIVE_COMPRESSION ' 'for this test to run') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1620,8 +1556,6 @@ def test_archive_catalog_1(self): '000000010000000000000001') self.assertEqual(timeline['status'], 'OK') - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_catalog_2(self): @@ -1632,10 +1566,9 @@ def test_archive_catalog_2(self): self.skipTest('You need to enable ARCHIVE_COMPRESSION ' 'for this test to run') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1677,8 +1610,6 @@ def test_archive_catalog_2(self): '000000010000000000000002') self.assertEqual(timeline['status'], 'OK') - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_options(self): @@ -1689,10 +1620,9 @@ def test_archive_options(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1755,8 +1685,6 @@ def test_archive_options(self): 'postgres', 'select 1') - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_options_1(self): @@ -1764,10 +1692,9 @@ def test_archive_options_1(self): check that '--archive-host', '--archive-user', '--archiver-port' and '--restore-command' are working as expected with set-config """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1826,8 +1753,6 @@ def test_archive_options_1(self): self.probackup_path, backup_dir, 'node', self.user), recovery_content) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_undefined_wal_file_path(self): @@ -1835,10 +1760,9 @@ def test_undefined_wal_file_path(self): check that archive-push works correct with undefined --wal-file-path """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1868,19 +1792,16 @@ def test_undefined_wal_file_path(self): # check self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_intermediate_archiving(self): """ check that archive-push works correct with --wal-file-path setting by user """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) node_pg_options = {} @@ -1893,7 +1814,7 @@ def test_intermediate_archiving(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) - wal_dir = os.path.join(self.tmp_path, module_name, fname, 'intermediate_dir') + wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'intermediate_dir') shutil.rmtree(wal_dir, ignore_errors=True) os.makedirs(wal_dir) if os.name == 'posix': @@ -1918,8 +1839,6 @@ def test_intermediate_archiving(self): self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], wal_segment) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_waldir_outside_pgdata_archiving(self): @@ -1927,16 +1846,15 @@ def test_waldir_outside_pgdata_archiving(self): check that archive-push works correct with symlinked waldir """ if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip( + self.skipTest( 'Skipped because waldir outside pgdata is supported since PG 10') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - external_wal_dir = os.path.join(self.tmp_path, module_name, fname, 'ext_wal_dir') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + external_wal_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'ext_wal_dir') shutil.rmtree(external_wal_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums', '--waldir={0}'.format(external_wal_dir)]) self.init_pb(backup_dir) @@ -1953,18 +1871,15 @@ def test_waldir_outside_pgdata_archiving(self): # check self.assertEqual(self.show_archive(backup_dir, instance='node', tli=1)['min-segno'], '000000010000000000000001') - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_hexadecimal_timeline(self): """ Check that timelines are correct. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2012,9 +1927,6 @@ def test_hexadecimal_timeline(self): '0000000D000000000000001C', tli13['max-segno']) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_archiving_and_slots(self): @@ -2022,10 +1934,9 @@ def test_archiving_and_slots(self): Check that archiving don`t break slot guarantee. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -2076,15 +1987,11 @@ def test_archiving_and_slots(self): exit(1) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_archive_push_sanity(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -2106,7 +2013,7 @@ def test_archive_push_sanity(self): self.backup_node(backup_dir, 'node', node) with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() + postgres_log_content = cleanup_ptrack(f.read()) # print(postgres_log_content) # make sure that .backup file is not compressed @@ -2114,14 +2021,14 @@ def test_archive_push_sanity(self): self.assertNotIn('WARNING', postgres_log_content) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node( backup_dir, 'node', replica, data_dir=replica.data_dir, options=['-R']) - #self.set_archiving(backup_dir, 'replica', replica, replica=True) + # self.set_archiving(backup_dir, 'replica', replica, replica=True) self.set_auto_conf(replica, {'port': replica.port}) self.set_auto_conf(replica, {'archive_mode': 'always'}) self.set_auto_conf(replica, {'hot_standby': 'on'}) @@ -2134,14 +2041,22 @@ def test_archive_push_sanity(self): replica.promote() replica.pgbench_init(scale=10) - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - replica_log_content = f.read() + log = tail_file(os.path.join(replica.logs_dir, 'postgresql.log'), + collect=True) + log.wait(regex=r"pushing file.*history") + log.wait(contains='archive-push completed successfully') + log.wait(regex=r"pushing file.*partial") + log.wait(contains='archive-push completed successfully') # make sure that .partial file is not compressed - self.assertNotIn('.partial.gz', replica_log_content) + self.assertNotIn('.partial.gz', log.content) # make sure that .history file is not compressed - self.assertNotIn('.history.gz', replica_log_content) - self.assertNotIn('WARNING', replica_log_content) + self.assertNotIn('.history.gz', log.content) + + replica.stop() + log.wait_shutdown() + + self.assertNotIn('WARNING', cleanup_ptrack(log.content)) output = self.show_archive( backup_dir, 'node', as_json=False, as_text=True, @@ -2149,23 +2064,18 @@ def test_archive_push_sanity(self): self.assertNotIn('WARNING', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_pg_receivexlog_partial_handling(self): """check that archive-get delivers .partial and .gz.partial files""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -2223,7 +2133,7 @@ def test_archive_pg_receivexlog_partial_handling(self): pg_receivexlog.kill() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2234,26 +2144,17 @@ def test_archive_pg_receivexlog_partial_handling(self): node_restored.slow_start() - result = node.safe_psql( - "postgres", - "select sum(id) from t_heap").decode('utf-8').rstrip() - - result_new = node_restored.safe_psql( - "postgres", - "select sum(id) from t_heap").decode('utf-8').rstrip() + result = node.table_checksum("t_heap") + result_new = node_restored.table_checksum("t_heap") self.assertEqual(result, result_new) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_multi_timeline_recovery_prefetching(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2303,9 +2204,7 @@ def test_multi_timeline_recovery_prefetching(self): node.slow_start() node.pgbench_init(scale=20) - result = node.safe_psql( - 'postgres', - 'select * from pgbench_accounts') + result = node.table_checksum("pgbench_accounts") node.stop() node.cleanup() @@ -2330,9 +2229,7 @@ def test_multi_timeline_recovery_prefetching(self): node.slow_start() - result_new = node.safe_psql( - 'postgres', - 'select * from pgbench_accounts') + result_new = node.table_checksum("pgbench_accounts") self.assertEqual(result, result_new) @@ -2356,25 +2253,20 @@ def test_multi_timeline_recovery_prefetching(self): 'WAL segment 000000010000000000000006, prefetch state: 5/10', postgres_log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_archive_get_batching_sanity(self): """ Make sure that batching works. .gz file is corrupted and uncompressed is not, check that both corruption detected and uncompressed file is used. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -2388,7 +2280,7 @@ def test_archive_get_batching_sanity(self): node.pgbench_init(scale=50) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node( @@ -2429,18 +2321,14 @@ def test_archive_get_batching_sanity(self): self.assertIn('prefetch state: 9/10', postgres_log_content) self.assertIn('prefetch state: 8/10', postgres_log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_archive_get_prefetch_corruption(self): """ Make sure that WAL corruption is detected. And --prefetch-dir is honored. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2455,7 +2343,7 @@ def test_archive_get_prefetch_corruption(self): node.pgbench_init(scale=50) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node( @@ -2552,21 +2440,11 @@ def test_archive_get_prefetch_corruption(self): os.remove(os.path.join(replica.logs_dir, 'postgresql.log')) replica.slow_start(replica=True) - sleep(60) - - with open(os.path.join(replica.logs_dir, 'postgresql.log'), 'r') as f: - postgres_log_content = f.read() - - self.assertIn( - 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename), - postgres_log_content) - - self.assertIn( - 'LOG: restored log file "{0}" from archive'.format(filename), - postgres_log_content) - - # Clean after yourself - self.del_test_dir(module_name, fname) + prefetch_line = 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename) + restored_line = 'LOG: restored log file "{0}" from archive'.format(filename) + tailer = tail_file(os.path.join(replica.logs_dir, 'postgresql.log')) + tailer.wait(contains=prefetch_line) + tailer.wait(contains=restored_line) # @unittest.skip("skip") def test_archive_show_partial_files_handling(self): @@ -2574,10 +2452,9 @@ def test_archive_show_partial_files_handling(self): check that files with '.part', '.part.gz', '.partial' and '.partial.gz' siffixes are handled correctly """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2696,19 +2573,15 @@ def test_archive_show_partial_files_handling(self): 'WARNING', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_archive_empty_history_file(self): """ https://github.com/postgrespro/pg_probackup/issues/326 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2789,7 +2662,16 @@ def test_archive_empty_history_file(self): 'WARNING: History file is corrupted or missing: "{0}"'.format(os.path.join(wal_dir, '00000004.history')), log_content) - self.del_test_dir(module_name, fname) + +def cleanup_ptrack(log_content): + # PBCKP-423 - need to clean ptrack warning + ptrack_is_not = 'Ptrack 1.X is not supported anymore' + if ptrack_is_not in log_content: + lines = [line for line in log_content.splitlines() + if ptrack_is_not not in line] + log_content = "".join(lines) + return log_content + # TODO test with multiple not archived segments. # TODO corrupted file in archive. diff --git a/tests/auth_test.py b/tests/auth_test.py index 78af21be9..32cabc4a1 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -30,18 +30,23 @@ def test_backup_via_unprivileged_user(self): run a backups without EXECUTE rights on certain functions """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, + ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() + if self.ptrack: + node.safe_psql( + "postgres", + "CREATE EXTENSION ptrack") + node.safe_psql("postgres", "CREATE ROLE backup with LOGIN") try: @@ -51,16 +56,29 @@ def test_backup_via_unprivileged_user(self): 1, 0, "Expecting Error due to missing grant on EXECUTE.") except ProbackupException as e: - self.assertIn( - "ERROR: query failed: ERROR: permission denied " - "for function pg_start_backup", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_start_backup", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: query failed: ERROR: permission denied " + "for function pg_backup_start", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION" - " pg_start_backup(text, boolean, boolean) TO backup;") + if self.get_version(node) < 150000: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_start_backup(text, boolean, boolean) TO backup;") + else: + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION" + " pg_backup_start(text, boolean) TO backup;") if self.get_version(node) < 100000: node.safe_psql( @@ -97,25 +115,32 @@ def test_backup_via_unprivileged_user(self): 1, 0, "Expecting Error due to missing grant on EXECUTE.") except ProbackupException as e: - self.assertIn( - "ERROR: query failed: ERROR: permission denied " - "for function pg_stop_backup", e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: Query failed: ERROR: permission denied " + "for function pg_stop_backup", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: Query failed: ERROR: permission denied " + "for function pg_backup_stop", e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) if self.get_version(node) < self.version_to_num('10.0'): node.safe_psql( "postgres", "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup") - else: + elif self.get_version(node) < self.version_to_num('15.0'): node.safe_psql( "postgres", - "GRANT EXECUTE ON FUNCTION " - "pg_stop_backup(boolean, boolean) TO backup") - # Do this for ptrack backups + "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean, boolean) TO backup;") + else: node.safe_psql( "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup") + "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup;") self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -128,8 +153,6 @@ def test_backup_via_unprivileged_user(self): node.safe_psql( "test1", "create table t1 as select generate_series(0,100)") - if self.ptrack: - self.set_auto_conf(node, {'ptrack_enable': 'on'}) node.stop() node.slow_start() @@ -142,18 +165,17 @@ def test_backup_via_unprivileged_user(self): backup_dir, 'node', node, options=['-U', 'backup']) # PTRACK -# self.backup_node( -# backup_dir, 'node', node, -# backup_type='ptrack', options=['-U', 'backup']) - - # Clean after yourself - self.del_test_dir(module_name, fname) + if self.ptrack: + self.backup_node( + backup_dir, 'node', node, + backup_type='ptrack', options=['-U', 'backup']) class AuthTest(unittest.TestCase): pb = None node = None + # TODO move to object scope, replace module_name @classmethod def setUpClass(cls): @@ -167,7 +189,10 @@ def setUpClass(cls): set_replication=True, initdb_params=['--data-checksums', '--auth-host=md5'] ) - modify_pg_hba(cls.node) + + cls.username = cls.pb.get_username() + + cls.modify_pg_hba(cls.node) cls.pb.init_pb(cls.backup_dir) cls.pb.add_instance(cls.backup_dir, cls.node.name, cls.node) @@ -177,22 +202,54 @@ def setUpClass(cls): except StartNodeException: raise unittest.skip("Node hasn't started") - cls.node.safe_psql( - "postgres", - "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + if cls.pb.get_version(cls.node) < 100000: + cls.node.safe_psql( + "postgres", + "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + elif cls.pb.get_version(cls.node) < 150000: + cls.node.safe_psql( + "postgres", + "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + else: + cls.node.safe_psql( + "postgres", + "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") + cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') + # TODO move to object scope, replace module_name @classmethod def tearDownClass(cls): cls.node.cleanup() @@ -200,12 +257,13 @@ def tearDownClass(cls): @unittest.skipIf(skip_test, "Module pexpect isn't installed. You need to install it.") def setUp(self): - self.cmd = ['backup', + self.pb_cmd = ['backup', '-B', self.backup_dir, '--instance', self.node.name, '-h', '127.0.0.1', '-p', str(self.node.port), '-U', 'backup', + '-d', 'postgres', '-b', 'FULL' ] @@ -225,44 +283,31 @@ def test_empty_password(self): """ Test case: PGPB_AUTH03 - zero password length """ try: self.assertIn("ERROR: no password supplied", - str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, '\0\r\n')) - ) + self.run_pb_with_auth('\0\r\n')) except (TIMEOUT, ExceptionPexpect) as e: self.fail(e.value) def test_wrong_password(self): """ Test case: PGPB_AUTH04 - incorrect password """ - try: - self.assertIn("password authentication failed", - str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, 'wrong_password\r\n')) - ) - except (TIMEOUT, ExceptionPexpect) as e: - self.fail(e.value) + self.assertIn("password authentication failed", + self.run_pb_with_auth('wrong_password\r\n')) def test_right_password(self): """ Test case: PGPB_AUTH01 - correct password """ - try: - self.assertIn("completed", - str(run_pb_with_auth([self.pb.probackup_path] + self.cmd, 'password\r\n')) - ) - except (TIMEOUT, ExceptionPexpect) as e: - self.fail(e.value) + self.assertIn("completed", + self.run_pb_with_auth('password\r\n')) def test_right_password_and_wrong_pgpass(self): """ Test case: PGPB_AUTH05 - correct password and incorrect .pgpass (-W)""" line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password']) - create_pgpass(self.pgpass_file, line) - try: - self.assertIn("completed", - str(run_pb_with_auth([self.pb.probackup_path] + self.cmd + ['-W'], 'password\r\n')) - ) - except (TIMEOUT, ExceptionPexpect) as e: - self.fail(e.value) + self.create_pgpass(self.pgpass_file, line) + self.assertIn("completed", + self.run_pb_with_auth('password\r\n', add_args=["-W"])) def test_ctrl_c_event(self): """ Test case: PGPB_AUTH02 - send interrupt signal """ try: - run_pb_with_auth([self.pb.probackup_path] + self.cmd, kill=True) + self.run_pb_with_auth(kill=True) except TIMEOUT: self.fail("Error: CTRL+C event ignored") @@ -270,91 +315,74 @@ def test_pgpassfile_env(self): """ Test case: PGPB_AUTH06 - set environment var PGPASSFILE """ path = os.path.join(self.pb.tmp_path, module_name, 'pgpass.conf') line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password']) - create_pgpass(path, line) + self.create_pgpass(path, line) self.pb.test_env["PGPASSFILE"] = path - try: - self.assertEqual( - "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"], - "ERROR: Full backup status is not valid." - ) - except ProbackupException as e: - self.fail(e) + self.assertEqual( + "OK", + self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) def test_pgpass(self): """ Test case: PGPB_AUTH07 - Create file .pgpass in home dir. """ line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password']) - create_pgpass(self.pgpass_file, line) - try: - self.assertEqual( - "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"], - "ERROR: Full backup status is not valid." - ) - except ProbackupException as e: - self.fail(e) + self.create_pgpass(self.pgpass_file, line) + self.assertEqual( + "OK", + self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) def test_pgpassword(self): """ Test case: PGPB_AUTH08 - set environment var PGPASSWORD """ self.pb.test_env["PGPASSWORD"] = "password" - try: - self.assertEqual( - "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"], - "ERROR: Full backup status is not valid." - ) - except ProbackupException as e: - self.fail(e) + self.assertEqual( + "OK", + self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) def test_pgpassword_and_wrong_pgpass(self): """ Test case: PGPB_AUTH09 - Check priority between PGPASSWORD and .pgpass file""" line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'wrong_password']) - create_pgpass(self.pgpass_file, line) + self.create_pgpass(self.pgpass_file, line) self.pb.test_env["PGPASSWORD"] = "password" - try: - self.assertEqual( - "OK", - self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.cmd + ['-w']))["status"], - "ERROR: Full backup status is not valid." - ) - except ProbackupException as e: - self.fail(e) - + self.assertEqual( + "OK", + self.pb.show_pb(self.backup_dir, self.node.name, self.pb.run_pb(self.pb_cmd + ['-w']))["status"], + "ERROR: Full backup status is not valid." + ) -def run_pb_with_auth(cmd, password=None, kill=False): - try: - with spawn(" ".join(cmd), encoding='utf-8', timeout=10) as probackup: + def run_pb_with_auth(self, password=None, add_args = [], kill=False): + with spawn(self.pb.probackup_path, self.pb_cmd + add_args, encoding='utf-8', timeout=10) as probackup: result = probackup.expect(u"Password for user .*:", 5) if kill: probackup.kill(signal.SIGINT) elif result == 0: probackup.sendline(password) probackup.expect(EOF) - return probackup.before + return str(probackup.before) else: raise ExceptionPexpect("Other pexpect errors.") - except TIMEOUT: - raise TIMEOUT("Timeout error.") - except ExceptionPexpect: - raise ExceptionPexpect("Pexpect error.") - - -def modify_pg_hba(node): - """ - Description: - Add trust authentication for user postgres. Need for add new role and set grant. - :param node: - :return None: - """ - hba_conf = os.path.join(node.data_dir, "pg_hba.conf") - with open(hba_conf, 'r+') as fio: - data = fio.read() - fio.seek(0) - fio.write('host\tall\tpostgres\t127.0.0.1/0\ttrust\n' + data) - - -def create_pgpass(path, line): - with open(path, 'w') as passfile: - # host:port:db:username:password - passfile.write(line) - os.chmod(path, 0o600) + + + @classmethod + def modify_pg_hba(cls, node): + """ + Description: + Add trust authentication for user postgres. Need for add new role and set grant. + :param node: + :return None: + """ + hba_conf = os.path.join(node.data_dir, "pg_hba.conf") + with open(hba_conf, 'r+') as fio: + data = fio.read() + fio.seek(0) + fio.write('host\tall\t%s\t127.0.0.1/0\ttrust\n%s' % (cls.username, data)) + + + def create_pgpass(self, path, line): + with open(path, 'w') as passfile: + # host:port:db:username:password + passfile.write(line) + os.chmod(path, 0o600) diff --git a/tests/backup.py b/tests/backup_test.py similarity index 77% rename from tests/backup.py rename to tests/backup_test.py index b14f5fe98..dc60228b5 100644 --- a/tests/backup.py +++ b/tests/backup_test.py @@ -1,29 +1,82 @@ import unittest import os -from time import sleep -from .helpers.ptrack_helpers import ProbackupTest, ProbackupException +import re +from time import sleep, time +from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException import shutil from distutils.dir_util import copy_tree from testgres import ProcessType, QueryException import subprocess -module_name = 'backup' +class BackupTest(ProbackupTest, unittest.TestCase): + def test_full_backup(self): + """ + Just test full backup with at least two segments + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) -class BackupTest(ProbackupTest, unittest.TestCase): + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=100, no_vacuum=True) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node) + + out = self.validate_pb(backup_dir, 'node', backup_id) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + out) + + def test_full_backup_stream(self): + """ + Just test full backup with at least two segments in stream mode + """ + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + # we need to write a lot. Lets speedup a bit. + pg_options={"fsync": "off", "synchronous_commit": "off"}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # Fill with data + # Have to use scale=100 to create second segment. + node.pgbench_init(scale=100, no_vacuum=True) + + # FULL + backup_id = self.backup_node(backup_dir, 'node', node, + options=["--stream"]) + + out = self.validate_pb(backup_dir, 'node', backup_id) + self.assertIn( + "INFO: Backup {0} is valid".format(backup_id), + out) # @unittest.skip("skip") # @unittest.expectedFailure # PGPRO-707 def test_backup_modes_archive(self): """standart backup modes with ARCHIVE WAL method""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -80,18 +133,14 @@ def test_backup_modes_archive(self): backup_dir, 'node', backup_id=show_backup_2['id'])["parent-backup-id"]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_smooth_checkpoint(self): """full backup with smooth checkpoint""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -103,18 +152,14 @@ def test_smooth_checkpoint(self): self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") node.stop() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incremental_backup_without_full(self): """page backup without validated full backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -139,18 +184,14 @@ def test_incremental_backup_without_full(self): self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incremental_backup_corrupt_full(self): """page-level backup with corrupted full backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -200,19 +241,15 @@ def test_incremental_backup_corrupt_full(self): self.assertEqual( self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_threads_stream(self): """delta multi thread backup mode and stream""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -227,21 +264,17 @@ def test_delta_threads_stream(self): backup_type="delta", options=["-j", "4", "--stream"]) self.assertEqual(self.show_pb(backup_dir, 'node')[1]['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_detect_corruption(self): """make node, corrupt some page, check that backup failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -292,28 +325,23 @@ def test_page_detect_corruption(self): 'ERROR', "Backup Status should be ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") def test_backup_detect_corruption(self): """make node, corrupt some page, check that backup failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -439,27 +467,23 @@ def test_backup_detect_corruption(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_detect_invalid_block_header(self): """make node, corrupt some page, check that backup failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -580,27 +604,23 @@ def test_backup_detect_invalid_block_header(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_detect_missing_permissions(self): """make node, corrupt some page, check that backup failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() - if self.ptrack and node.major_version > 11: + if self.ptrack: node.safe_psql( "postgres", "create extension ptrack") @@ -721,22 +741,18 @@ def test_backup_detect_missing_permissions(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_truncate_misaligned(self): """ make node, truncate file to size not even to BLCKSIZE, take backup """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -772,19 +788,15 @@ def test_backup_truncate_misaligned(self): self.assertIn("WARNING: File", output) self.assertIn("invalid file size", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_tablespace_in_pgdata_pgpro_1376(self): """PGPRO-1376 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -866,9 +878,6 @@ def test_tablespace_in_pgdata_pgpro_1376(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_tablespace_handling(self): """ @@ -877,13 +886,12 @@ def test_basic_tablespace_handling(self): check that restore with tablespace mapping will end with success """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -925,7 +933,7 @@ def test_basic_tablespace_handling(self): tblspace2_new_path = self.get_tblspace_path(node, 'tblspace2_new') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -979,22 +987,18 @@ def test_basic_tablespace_handling(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_tablespace_handling_1(self): """ make node with tablespace A, take full backup, check that restore with tablespace mapping of tablespace B will end with error """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1014,7 +1018,7 @@ def test_tablespace_handling_1(self): options=["-j", "4", "--stream"]) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -1037,22 +1041,18 @@ def test_tablespace_handling_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_tablespace_handling_2(self): """ make node without tablespaces, take full backup, check that restore with tablespace mapping will end with error """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1066,7 +1066,7 @@ def test_tablespace_handling_2(self): options=["-j", "4", "--stream"]) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -1089,16 +1089,14 @@ def test_tablespace_handling_2(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_drop_rel_during_full_backup(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1170,16 +1168,12 @@ def test_drop_rel_during_full_backup(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_drop_db_during_full_backup(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1238,16 +1232,14 @@ def test_drop_db_during_full_backup(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_drop_rel_during_backup_delta(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1307,16 +1299,14 @@ def test_drop_rel_during_backup_delta(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_drop_rel_during_backup_page(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1373,16 +1363,12 @@ def test_drop_rel_during_backup_page(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_persistent_slot_for_stream_backup(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1407,22 +1393,18 @@ def test_persistent_slot_for_stream_backup(self): backup_dir, 'node', node, options=['--stream', '--slot=slot_1']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_temp_slot_for_stream_backup(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'max_wal_size': '40MB'}) if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') + self.skipTest('You need PostgreSQL >= 10 for this test') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1439,16 +1421,14 @@ def test_basic_temp_slot_for_stream_backup(self): backup_dir, 'node', node, options=['--stream', '--slot=slot_1', '--temp-slot']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_concurrent_drop_table(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1485,19 +1465,15 @@ def test_backup_concurrent_drop_table(self): self.assertEqual(show_backup['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pg_11_adjusted_wal_segment_size(self): """""" if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=[ '--data-checksums', @@ -1573,16 +1549,14 @@ def test_pg_11_adjusted_wal_segment_size(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_sigint_handling(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1612,16 +1586,14 @@ def test_sigint_handling(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_sigterm_handling(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1650,16 +1622,14 @@ def test_sigterm_handling(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_sigquit_handling(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1687,16 +1657,12 @@ def test_sigquit_handling(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_drop_table(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1722,19 +1688,15 @@ def test_drop_table(self): self.backup_node( backup_dir, 'node', node, options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_missing_file_permissions(self): """""" if os.name == 'nt': - return unittest.skip('Skipped because it is POSIX only test') + self.skipTest('Skipped because it is POSIX only test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1769,19 +1731,15 @@ def test_basic_missing_file_permissions(self): os.chmod(full_path, 700) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_missing_dir_permissions(self): """""" if os.name == 'nt': - return unittest.skip('Skipped because it is POSIX only test') + self.skipTest('Skipped because it is POSIX only test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1812,16 +1770,12 @@ def test_basic_missing_dir_permissions(self): os.rmdir(full_path) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_with_least_privileges_role(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], @@ -1875,8 +1829,7 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -1916,8 +1869,8 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -1954,6 +1907,44 @@ def test_backup_with_least_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: node.safe_psql( @@ -1968,11 +1959,8 @@ def test_backup_with_least_privileges_role(self): if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") - - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # FULL backup self.backup_node( @@ -2007,9 +1995,6 @@ def test_backup_with_least_privileges_role(self): backup_dir, 'node', node, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_choosing(self): """ @@ -2018,10 +2003,9 @@ def test_parent_choosing(self): PAGE1 <- CORRUPT FULL """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2071,9 +2055,6 @@ def test_parent_choosing(self): backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], full_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_choosing_1(self): """ @@ -2082,10 +2063,9 @@ def test_parent_choosing_1(self): PAGE1 <- (missing) FULL """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2131,9 +2111,6 @@ def test_parent_choosing_1(self): backup_dir, 'node', backup_id=page3_id)['parent-backup-id'], full_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_choosing_2(self): """ @@ -2142,10 +2119,9 @@ def test_parent_choosing_2(self): PAGE1 <- OK FULL <- (missing) """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2191,19 +2167,15 @@ def test_parent_choosing_2(self): backup_dir, 'node')[2]['status'], 'ERROR') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_with_less_privileges_role(self): """ check permissions correctness from documentation: https://github.com/postgrespro/pg_probackup/blob/master/Documentation.md#configuring-the-database-cluster """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], @@ -2232,7 +2204,6 @@ def test_backup_with_less_privileges_role(self): if self.get_version(node) < 90600: node.safe_psql( 'backupdb', - "BEGIN; " "CREATE ROLE backup WITH LOGIN; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " @@ -2243,14 +2214,11 @@ def test_backup_with_less_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( 'backupdb', - "BEGIN; " "CREATE ROLE backup WITH LOGIN; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " @@ -2265,11 +2233,10 @@ def test_backup_with_less_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " "COMMIT;" ) - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', - "BEGIN; " "CREATE ROLE backup WITH LOGIN; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " @@ -2284,6 +2251,25 @@ def test_backup_with_less_privileges_role(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " "COMMIT;" ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "BEGIN; " + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) # enable STREAM backup node.safe_psql( @@ -2324,12 +2310,11 @@ def test_backup_with_less_privileges_role(self): datname='backupdb', options=['--stream', '-U', 'backup']) if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) return # Restore as replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -2351,63 +2336,57 @@ def test_backup_with_less_privileges_role(self): replica.slow_start(replica=True) - # Archive backups from replica in this test are disabled, - # because WAL archiving on replica in idle DB in PostgreSQL is broken: - # replica will not archive the previous WAL until it receives new records in the next WAL file, - # this "lazy" archiving can be seen in src/backend/replication/walreceiver.c:XLogWalRcvWrite() - # (see !XLByteInSeg checking and XLogArchiveNotify() calling). - # # self.switch_wal_segment(node) - #self.backup_node( - # backup_dir, 'replica', replica, - # datname='backupdb', options=['-U', 'backup']) + # self.switch_wal_segment(node) + + self.backup_node( + backup_dir, 'replica', replica, + datname='backupdb', options=['-U', 'backup']) # stream full backup from replica self.backup_node( backup_dir, 'replica', replica, datname='backupdb', options=['--stream', '-U', 'backup']) +# self.switch_wal_segment(node) + # PAGE backup from replica - #self.switch_wal_segment(node) - #self.backup_node( - # backup_dir, 'replica', replica, backup_type='page', - # datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='page', + datname='backupdb', options=['-U', 'backup', '--archive-timeout=30s']) self.backup_node( backup_dir, 'replica', replica, backup_type='page', datname='backupdb', options=['--stream', '-U', 'backup']) # DELTA backup from replica - #self.switch_wal_segment(node) - #self.backup_node( - # backup_dir, 'replica', replica, backup_type='delta', - # datname='backupdb', options=['-U', 'backup']) + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='delta', + datname='backupdb', options=['-U', 'backup']) self.backup_node( backup_dir, 'replica', replica, backup_type='delta', datname='backupdb', options=['--stream', '-U', 'backup']) # PTRACK backup from replica if self.ptrack: - #self.switch_wal_segment(node) - #self.backup_node( - # backup_dir, 'replica', replica, backup_type='ptrack', - # datname='backupdb', options=['-U', 'backup']) + self.switch_wal_segment(node) + self.backup_node( + backup_dir, 'replica', replica, backup_type='ptrack', + datname='backupdb', options=['-U', 'backup']) self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_issue_132(self): """ https://github.com/postgrespro/pg_probackup/issues/132 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2434,18 +2413,14 @@ def test_issue_132(self): exit(1) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_issue_132_1(self): """ https://github.com/postgrespro/pg_probackup/issues/132 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2594,17 +2569,13 @@ def test_issue_132_1(self): 'INFO: Restore of backup {0} completed.'.format(delta_id), output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_note_sanity(self): """ test that adding note to backup works as expected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2632,18 +2603,14 @@ def test_note_sanity(self): 'note', backup_meta) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_backup_made_by_newer_version(self): """incremental backup with parent made by newer version""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2685,20 +2652,16 @@ def test_parent_backup_made_by_newer_version(self): self.assertEqual( self.show_pb(backup_dir, 'node')[1]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_issue_289(self): """ https://github.com/postgrespro/pg_probackup/issues/289 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2730,20 +2693,16 @@ def test_issue_289(self): self.assertEqual( self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_issue_290(self): """ https://github.com/postgrespro/pg_probackup/issues/290 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2779,18 +2738,14 @@ def test_issue_290(self): self.assertEqual( self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_issue_203(self): """ https://github.com/postgrespro/pg_probackup/issues/203 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2810,7 +2765,7 @@ def test_issue_203(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', @@ -2819,49 +2774,50 @@ def test_issue_203(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_issue_231(self): """ https://github.com/postgrespro/pg_probackup/issues/231 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), - set_replication=True, - initdb_params=['--data-checksums']) + base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) - node.slow_start() datadir = os.path.join(node.data_dir, '123') - try: - self.backup_node( - backup_dir, 'node', node, data_dir='{0}'.format(datadir)) - except: - pass - - out = self.backup_node(backup_dir, 'node', node, options=['--stream'], return_id=False) - - # it is a bit racy - self.assertIn("WARNING: Cannot create directory", out) - - # Clean after yourself - self.del_test_dir(module_name, fname) + t0 = time() + while True: + with self.assertRaises(ProbackupException) as ctx: + self.backup_node(backup_dir, 'node', node) + pb1 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0] + + t = time() + if int(pb1, 36) == int(t) and t % 1 < 0.5: + # ok, we have a chance to start next backup in same second + break + elif t - t0 > 20: + # Oops, we are waiting for too long. Looks like this runner + # is too slow. Lets skip the test. + self.skipTest("runner is too slow") + # sleep to the second's end so backup will not sleep for a second. + sleep(1 - t % 1) + + with self.assertRaises(ProbackupException) as ctx: + self.backup_node(backup_dir, 'node', node) + pb2 = re.search(r' backup ID: ([^\s,]+),', ctx.exception.message).groups()[0] + + self.assertNotEqual(pb1, pb2) def test_incr_backup_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2870,7 +2826,7 @@ def test_incr_backup_filenode_map(self): node.slow_start() node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), initdb_params=['--data-checksums']) node1.cleanup() @@ -2902,16 +2858,14 @@ def test_incr_backup_filenode_map(self): 'postgres', 'select 1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_missing_wal_segment(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums'], @@ -2972,21 +2926,17 @@ def test_missing_wal_segment(self): gdb.output) self.assertIn( - 'WARNING: backup in progress, stop backup', + 'WARNING: A backup is in progress, stopping it', gdb.output) - - # TODO: check the same for PAGE backup - # Clean after yourself - self.del_test_dir(module_name, fname) + # TODO: check the same for PAGE backup # @unittest.skip("skip") def test_missing_replication_permission(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3001,7 +2951,7 @@ def test_missing_replication_permission(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -3055,8 +3005,8 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "CREATE ROLE backup WITH LOGIN; " @@ -3078,13 +3028,36 @@ def test_missing_replication_permission(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") - + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") + sleep(2) replica.promote() @@ -3102,22 +3075,27 @@ def test_missing_replication_permission(self): except ProbackupException as e: # 9.5: ERROR: must be superuser or replication role to run a backup # >=9.6: FATAL: must be superuser or replication role to start walsender - self.assertRegex( - e.message, - "ERROR: must be superuser or replication role to run a backup|FATAL: must be superuser or replication role to start walsender", - "\n Unexpected Error Message: {0}\n CMD: {1}".format( - repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) + if self.pg_config_version < 160000: + self.assertRegex( + e.message, + "ERROR: must be superuser or replication role to run a backup|" + "FATAL: must be superuser or replication role to start walsender", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + else: + self.assertRegex( + e.message, + "FATAL: permission denied to start WAL sender\n" + "DETAIL: Only roles with the REPLICATION", + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) # @unittest.skip("skip") def test_missing_replication_permission_1(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3132,7 +3110,7 @@ def test_missing_replication_permission_1(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -3162,8 +3140,7 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -3188,8 +3165,8 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "CREATE ROLE backup WITH LOGIN; " @@ -3211,12 +3188,35 @@ def test_missing_replication_permission_1(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) + # > 15 + else: + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") replica.promote() @@ -3235,21 +3235,26 @@ def test_missing_replication_permission_1(self): # Messages for >=14 # 'WARNING: could not connect to database backupdb: connection to server on socket "/tmp/.s.PGSQL.30983" failed: FATAL: must be superuser or replication role to start walsender' # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (127.0.0.1), port 29732 failed: FATAL: must be superuser or replication role to start walsender' - self.assertRegex( - output, - r'WARNING: could not connect to database backupdb: (connection to server (on socket "/tmp/.s.PGSQL.\d+"|at "localhost" \(127.0.0.1\), port \d+) failed: ){0,1}' - 'FATAL: must be superuser or replication role to start walsender') + # OS-dependant messages: + # 'WARNING: could not connect to database backupdb: connection to server at "localhost" (::1), port 12101 failed: Connection refused\n\tIs the server running on that host and accepting TCP/IP connections?\nconnection to server at "localhost" (127.0.0.1), port 12101 failed: FATAL: must be superuser or replication role to start walsender' - # Clean after yourself - self.del_test_dir(module_name, fname) + if self.pg_config_version < 160000: + self.assertRegex( + output, + r'WARNING: could not connect to database backupdb:[\s\S]*?' + r'FATAL: must be superuser or replication role to start walsender') + else: + self.assertRegex( + output, + r'WARNING: could not connect to database backupdb:[\s\S]*?' + r'FATAL: permission denied to start WAL sender') # @unittest.skip("skip") def test_basic_backup_default_transaction_read_only(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'default_transaction_read_only': 'on'}) @@ -3288,16 +3293,14 @@ def test_basic_backup_default_transaction_read_only(self): # PAGE backup self.backup_node(backup_dir, 'node', node, backup_type='page') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_backup_atexit(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3330,27 +3333,28 @@ def test_backup_atexit(self): log_content = f.read() #print(log_content) self.assertIn( - 'WARNING: backup in progress, stop backup', + 'WARNING: A backup is in progress, stopping it.', log_content) - - self.assertIn( - 'FROM pg_catalog.pg_stop_backup', - log_content) - + + if self.get_version(node) < 150000: + self.assertIn( + 'FROM pg_catalog.pg_stop_backup', + log_content) + else: + self.assertIn( + 'FROM pg_catalog.pg_backup_stop', + log_content) + self.assertIn( 'setting its status to ERROR', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pg_stop_backup_missing_permissions(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3372,10 +3376,15 @@ def test_pg_stop_backup_missing_permissions(self): node.safe_psql( 'postgres', 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') - else: + elif self.get_version(node) < 150000: node.safe_psql( 'postgres', 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') + else: + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) FROM backup') + # Full backup in streaming mode try: @@ -3383,22 +3392,267 @@ def test_pg_stop_backup_missing_permissions(self): backup_dir, 'node', node, options=['--stream', '-U', 'backup']) # we should die here because exception is what we expect to happen - self.assertEqual( - 1, 0, - "Expecting Error because of missing permissions on pg_stop_backup " - "\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) + if self.get_version(node) < 150000: + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions on pg_stop_backup " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + else: + self.assertEqual( + 1, 0, + "Expecting Error because of missing permissions on pg_backup_stop " + "\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) except ProbackupException as e: + if self.get_version(node) < 150000: + self.assertIn( + "ERROR: permission denied for function pg_stop_backup", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + else: + self.assertIn( + "ERROR: permission denied for function pg_backup_stop", + e.message, + "\n Unexpected Error Message: {0}\n CMD: {1}".format( + repr(e.message), self.cmd)) + self.assertIn( - "ERROR: permission denied for function pg_stop_backup", + "query was: SELECT pg_catalog.txid_snapshot_xmax", e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - self.assertIn( - "query was: SELECT pg_catalog.txid_snapshot_xmax", + + # @unittest.skip("skip") + def test_start_time(self): + """Test, that option --start-time allows to set backup_id and restore""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore FULL backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_full'), + backup_id=base36enc(startTime)) + + #FULL backup with incorrect start time + try: + startTime = str(int(time()-100000)) + self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + 'Expecting Error because start time for new backup must be newer ' + '\n Output: {0} \n CMD: {1}'.format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertRegex( e.message, + r"ERROR: Can't assign backup_id from requested start_time \(\w*\), this time must be later that backup \w*\n", "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) + # DELTA backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore DELTA backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_delta'), + backup_id=base36enc(startTime)) + + # PAGE backup + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='page', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PAGE backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_page'), + backup_id=base36enc(startTime)) + + # PTRACK backup + if self.ptrack: + node.safe_psql( + 'postgres', + 'create extension ptrack') + + startTime = int(time()) + self.backup_node( + backup_dir, 'node', node, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(str(startTime))]) + # restore PTRACK backup by backup_id calculated from start-time + self.restore_node( + backup_dir, 'node', + data_dir=os.path.join(self.tmp_path, self.module_name, self.fname, 'node_restored_ptrack'), + backup_id=base36enc(startTime)) + + # @unittest.skip("skip") + def test_start_time_few_nodes(self): + """Test, that we can synchronize backup_id's for different DBs""" + node1 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node1'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir1 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup1') + self.init_pb(backup_dir1) + self.add_instance(backup_dir1, 'node1', node1) + self.set_archiving(backup_dir1, 'node1', node1) + node1.slow_start() + + node2 = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums']) + + backup_dir2 = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup2') + self.init_pb(backup_dir2) + self.add_instance(backup_dir2, 'node2', node2) + self.set_archiving(backup_dir2, 'node2', node2) + node2.slow_start() + + # FULL backup + startTime = str(int(time())) + self.backup_node( + backup_dir1, 'node1', node1, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type='full', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[0] + show_backup2 = self.show_pb(backup_dir2, 'node2')[0] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # DELTA backup + startTime = str(int(time())) + self.backup_node( + backup_dir1, 'node1', node1, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type='delta', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[1] + show_backup2 = self.show_pb(backup_dir2, 'node2')[1] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # PAGE backup + startTime = str(int(time())) + self.backup_node( + backup_dir1, 'node1', node1, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type='page', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[2] + show_backup2 = self.show_pb(backup_dir2, 'node2')[2] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + # PTRACK backup + if self.ptrack: + node1.safe_psql( + 'postgres', + 'create extension ptrack') + node2.safe_psql( + 'postgres', + 'create extension ptrack') + + startTime = str(int(time())) + self.backup_node( + backup_dir1, 'node1', node1, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) + self.backup_node( + backup_dir2, 'node2', node2, backup_type='ptrack', + options=['--stream', '--start-time={0}'.format(startTime)]) + show_backup1 = self.show_pb(backup_dir1, 'node1')[3] + show_backup2 = self.show_pb(backup_dir2, 'node2')[3] + self.assertEqual(show_backup1['id'], show_backup2['id']) + + def test_regress_issue_585(self): + """https://github.com/postgrespro/pg_probackup/issues/585""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + node.slow_start() + + # create couple of files that looks like db files + with open(os.path.join(node.data_dir, 'pg_multixact/offsets/1000'),'wb') as f: + pass + with open(os.path.join(node.data_dir, 'pg_multixact/members/1000'),'wb') as f: + pass + + self.backup_node( + backup_dir, 'node', node, backup_type='full', + options=['--stream']) + + output = self.backup_node( + backup_dir, 'node', node, backup_type='delta', + options=['--stream'], + return_id=False, + ) + self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like') + + node.cleanup() + + output = self.restore_node(backup_dir, 'node', node) + self.assertNotRegex(output, r'WARNING: [^\n]* was stored as .* but looks like') + + def test_2_delta_backups(self): + """https://github.com/postgrespro/pg_probackup/issues/596""" + node = self.make_simple_node('node', + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + # self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # FULL + full_backup_id = self.backup_node(backup_dir, 'node', node, options=["--stream"]) + + # delta backup mode + delta_backup_id1 = self.backup_node( + backup_dir, 'node', node, backup_type="delta", options=["--stream"]) + + delta_backup_id2 = self.backup_node( + backup_dir, 'node', node, backup_type="delta", options=["--stream"]) + + # postgresql.conf and pg_hba.conf shouldn't be copied + conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id1, 'database', 'postgresql.conf') + self.assertFalse( + os.path.exists(conf_file), + "File should not exist: {0}".format(conf_file)) + conf_file = os.path.join(backup_dir, 'backups', 'node', delta_backup_id2, 'database', 'postgresql.conf') + print(conf_file) + self.assertFalse( + os.path.exists(conf_file), + "File should not exist: {0}".format(conf_file)) diff --git a/tests/catchup.py b/tests/catchup_test.py similarity index 79% rename from tests/catchup.py rename to tests/catchup_test.py index 8441deaaf..cf8388dd2 100644 --- a/tests/catchup.py +++ b/tests/catchup_test.py @@ -4,11 +4,7 @@ import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'catchup' - class CatchupTest(ProbackupTest, unittest.TestCase): - def setUp(self): - self.fname = self.id().split('.')[3] ######################################### # Basic tests @@ -19,17 +15,17 @@ def test_basic_full_catchup(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() src_pg.safe_psql( "postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do full catchup - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -51,13 +47,12 @@ def test_basic_full_catchup(self): dst_pg.slow_start() # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) def test_full_catchup_with_tablespace(self): """ @@ -65,7 +60,7 @@ def test_full_catchup_with_tablespace(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() @@ -74,10 +69,10 @@ def test_full_catchup_with_tablespace(self): src_pg.safe_psql( "postgres", "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do full catchup with tablespace mapping - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') self.catchup_node( backup_mode = 'FULL', @@ -110,12 +105,11 @@ def test_full_catchup_with_tablespace(self): dst_pg.slow_start() # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup dst_pg.stop() - self.del_test_dir(module_name, self.fname) def test_basic_delta_catchup(self): """ @@ -123,7 +117,7 @@ def test_basic_delta_catchup(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -133,7 +127,7 @@ def test_basic_delta_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -152,7 +146,7 @@ def test_basic_delta_catchup(self): pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup self.catchup_node( @@ -177,24 +171,23 @@ def test_basic_delta_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) def test_basic_ptrack_catchup(self): """ Test ptrack catchup """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -206,7 +199,7 @@ def test_basic_ptrack_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -225,7 +218,7 @@ def test_basic_ptrack_catchup(self): pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do ptrack catchup self.catchup_node( @@ -250,13 +243,12 @@ def test_basic_ptrack_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) def test_tli_delta_catchup(self): """ @@ -264,14 +256,14 @@ def test_tli_delta_catchup(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -290,7 +282,7 @@ def test_tli_delta_catchup(self): src_pg.slow_start(replica = True) src_pg.promote() src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do catchup (src_tli = 2, dst_tli = 1) self.catchup_node( @@ -314,7 +306,7 @@ def test_tli_delta_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') dst_pg.stop() @@ -329,18 +321,17 @@ def test_tli_delta_catchup(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tli_ptrack_catchup(self): """ Test that we correctly follow timeline change with ptrack catchup """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -349,7 +340,7 @@ def test_tli_ptrack_catchup(self): src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -373,7 +364,7 @@ def test_tli_ptrack_catchup(self): self.assertEqual(src_tli, "2", "Postgres didn't update TLI after promote") src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do catchup (src_tli = 2, dst_tli = 1) self.catchup_node( @@ -397,7 +388,7 @@ def test_tli_ptrack_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') dst_pg.stop() @@ -412,7 +403,6 @@ def test_tli_ptrack_catchup(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) ######################################### # Test various corner conditions @@ -423,7 +413,7 @@ def test_table_drop_with_delta(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -433,7 +423,7 @@ def test_table_drop_with_delta(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -468,18 +458,17 @@ def test_table_drop_with_delta(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_table_drop_with_ptrack(self): """ Test that dropped table in source will be dropped in ptrack catchup'ed instance too """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -491,7 +480,7 @@ def test_table_drop_with_ptrack(self): "CREATE TABLE ultimate_question AS SELECT 42 AS answer") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -526,7 +515,6 @@ def test_table_drop_with_ptrack(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tablefile_truncation_with_delta(self): """ @@ -534,7 +522,7 @@ def test_tablefile_truncation_with_delta(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -549,7 +537,7 @@ def test_tablefile_truncation_with_delta(self): src_pg.safe_psql("postgres", "VACUUM t_heap") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -583,18 +571,17 @@ def test_tablefile_truncation_with_delta(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tablefile_truncation_with_ptrack(self): """ Test that truncated table in source will be truncated in ptrack catchup'ed instance too """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, initdb_params = ['--data-checksums'] @@ -611,7 +598,7 @@ def test_tablefile_truncation_with_ptrack(self): src_pg.safe_psql("postgres", "VACUUM t_heap") # preparation 2: make clean shutdowned lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -645,7 +632,6 @@ def test_tablefile_truncation_with_ptrack(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) ######################################### # Test reaction on user errors @@ -655,9 +641,9 @@ def test_local_tablespace_without_mapping(self): Test that we detect absence of needed --tablespace-mapping option """ if self.remote: - return unittest.skip('Skipped because this test tests local catchup error handling') + self.skipTest('Skipped because this test tests local catchup error handling') - src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'src')) + src_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'src')) src_pg.slow_start() tblspace_path = self.get_tblspace_path(src_pg, 'tblspace') @@ -669,7 +655,7 @@ def test_local_tablespace_without_mapping(self): "postgres", "CREATE TABLE ultimate_question TABLESPACE tblspace AS SELECT 42 AS answer") - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) try: self.catchup_node( backup_mode = 'FULL', @@ -691,7 +677,6 @@ def test_local_tablespace_without_mapping(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_running_dest_postmaster(self): """ @@ -699,14 +684,14 @@ def test_running_dest_postmaster(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -738,7 +723,6 @@ def test_running_dest_postmaster(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_same_db_id(self): """ @@ -747,12 +731,12 @@ def test_same_db_id(self): # preparation: # source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() # destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -765,9 +749,9 @@ def test_same_db_id(self): dst_pg.slow_start() dst_pg.stop() # fake destination - fake_dst_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'fake_dst')) + fake_dst_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'fake_dst')) # fake source - fake_src_pg = self.make_simple_node(base_dir = os.path.join(module_name, self.fname, 'fake_src')) + fake_src_pg = self.make_simple_node(base_dir = os.path.join(self.module_name, self.fname, 'fake_src')) # try delta catchup (src (with correct src conn), fake_dst) try: @@ -803,7 +787,6 @@ def test_same_db_id(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tli_destination_mismatch(self): """ @@ -811,14 +794,14 @@ def test_tli_destination_mismatch(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -835,7 +818,7 @@ def test_tli_destination_mismatch(self): # preparation 3: "useful" changes src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # try catchup try: @@ -849,7 +832,7 @@ def test_tli_destination_mismatch(self): dst_options['port'] = str(dst_pg.port) self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") dst_pg.stop() self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') except ProbackupException as e: @@ -860,7 +843,6 @@ def test_tli_destination_mismatch(self): # Cleanup src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_tli_source_mismatch(self): """ @@ -868,14 +850,14 @@ def test_tli_source_mismatch(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) src_pg.slow_start() # preparation 2: fake source (promouted copy) - fake_src_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'fake_src')) + fake_src_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'fake_src')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -899,7 +881,7 @@ def test_tli_source_mismatch(self): fake_src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 'trash' AS garbage") # preparation 3: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -914,7 +896,7 @@ def test_tli_source_mismatch(self): # preparation 4: "useful" changes src_pg.safe_psql("postgres", "CREATE TABLE ultimate_question AS SELECT 42 AS answer") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # try catchup try: @@ -928,7 +910,7 @@ def test_tli_source_mismatch(self): dst_options['port'] = str(dst_pg.port) self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") dst_pg.stop() self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') except ProbackupException as e: @@ -940,7 +922,6 @@ def test_tli_source_mismatch(self): # Cleanup src_pg.stop() fake_src_pg.stop() - self.del_test_dir(module_name, self.fname) ######################################### # Test unclean destination @@ -951,7 +932,7 @@ def test_unclean_delta_catchup(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -961,7 +942,7 @@ def test_unclean_delta_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -991,14 +972,14 @@ def test_unclean_delta_catchup(self): self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") - os.kill(dst_pg.pid, signal.SIGKILL) + dst_pg.kill() # preparation 3: make changes on master (source) src_pg.pgbench_init(scale = 10) pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup self.catchup_node( @@ -1023,23 +1004,22 @@ def test_unclean_delta_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup dst_pg.stop() - self.del_test_dir(module_name, self.fname) def test_unclean_ptrack_catchup(self): """ Test that we correctly recover uncleanly shutdowned destination """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, ptrack_enable = True, pg_options = { 'wal_log_hints': 'on' } @@ -1051,7 +1031,7 @@ def test_unclean_ptrack_catchup(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: destination - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1081,14 +1061,14 @@ def test_unclean_ptrack_catchup(self): self.set_auto_conf(dst_pg, dst_options) dst_pg.slow_start() self.assertNotEqual(dst_pg.pid, 0, "Cannot detect pid of running postgres") - os.kill(dst_pg.pid, signal.SIGKILL) + dst_pg.kill() # preparation 3: make changes on master (source) src_pg.pgbench_init(scale = 10) pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) pgbench.wait() src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") # do delta catchup self.catchup_node( @@ -1113,12 +1093,11 @@ def test_unclean_ptrack_catchup(self): dst_pg.slow_start(replica = True) # 2nd check: run verification query - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup dst_pg.stop() - self.del_test_dir(module_name, self.fname) ######################################### # Test replication slot logic @@ -1139,13 +1118,13 @@ def test_catchup_with_replication_slot(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() # 1a. --slot option - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_1a')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_1a')) try: self.catchup_node( backup_mode = 'FULL', @@ -1165,7 +1144,7 @@ def test_catchup_with_replication_slot(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) # 1b. --slot option - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_1b')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_1b')) src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_1b')") self.catchup_node( backup_mode = 'FULL', @@ -1178,7 +1157,7 @@ def test_catchup_with_replication_slot(self): ) # 2a. --slot --perm-slot - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_2a')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_2a')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1191,7 +1170,7 @@ def test_catchup_with_replication_slot(self): ) # 2b. and 4. --slot --perm-slot - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_2b')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_2b')) src_pg.safe_psql("postgres", "SELECT pg_catalog.pg_create_physical_replication_slot('existentslot_2b')") try: self.catchup_node( @@ -1213,7 +1192,7 @@ def test_catchup_with_replication_slot(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) # 3. --perm-slot --slot - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_3')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_3')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1233,7 +1212,7 @@ def test_catchup_with_replication_slot(self): # 5. --perm-slot --temp-slot (PG>=10) if self.get_version(src_pg) >= self.version_to_num('10.0'): - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst_5')) try: self.catchup_node( backup_mode = 'FULL', @@ -1254,7 +1233,6 @@ def test_catchup_with_replication_slot(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) ######################################### # --exclude-path @@ -1265,7 +1243,7 @@ def test_catchup_with_exclude_path(self): """ # preparation src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True ) src_pg.slow_start() @@ -1282,7 +1260,7 @@ def test_catchup_with_exclude_path(self): f.flush() f.close - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1333,7 +1311,6 @@ def test_catchup_with_exclude_path(self): #self.assertEqual(1, 0, 'Stop test') src_pg.stop() - self.del_test_dir(module_name, self.fname) def test_config_exclusion(self): """ @@ -1341,7 +1318,7 @@ def test_config_exclusion(self): """ # preparation 1: source src_pg = self.make_simple_node( - base_dir = os.path.join(module_name, self.fname, 'src'), + base_dir = os.path.join(self.module_name, self.fname, 'src'), set_replication = True, pg_options = { 'wal_log_hints': 'on' } ) @@ -1351,7 +1328,7 @@ def test_config_exclusion(self): "CREATE TABLE ultimate_question(answer int)") # preparation 2: make lagging behind replica - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst')) + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = src_pg.data_dir, @@ -1362,6 +1339,7 @@ def test_config_exclusion(self): dst_options = {} dst_options['port'] = str(dst_pg.port) self.set_auto_conf(dst_pg, dst_options) + dst_pg._assign_master(src_pg) dst_pg.slow_start(replica = True) dst_pg.stop() @@ -1389,8 +1367,9 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") + dst_pg.catchup() # wait for replication + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # preparation 4: make changes on master (source) @@ -1418,8 +1397,9 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(2*42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") + dst_pg.catchup() # wait for replication + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # preparation 5: make changes on master (source) @@ -1446,12 +1426,201 @@ def test_config_exclusion(self): # check: run verification query src_pg.safe_psql("postgres", "INSERT INTO ultimate_question VALUES(3*42)") - src_query_result = src_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") - dst_query_result = dst_pg.safe_psql("postgres", "SELECT * FROM ultimate_question") + src_query_result = src_pg.table_checksum("ultimate_question") + dst_pg.catchup() # wait for replication + dst_query_result = dst_pg.table_checksum("ultimate_question") self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') # Cleanup src_pg.stop() dst_pg.stop() #self.assertEqual(1, 0, 'Stop test') - self.del_test_dir(module_name, self.fname) + +######################################### +# --dry-run +######################################### + def test_dry_run_catchup_full(self): + """ + Test dry-run option for full catchup + """ + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(self.module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do full catchup + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after catchup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + + def test_dry_run_catchup_ptrack(self): + """ + Test dry-run option for catchup in incremental ptrack mode + """ + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(self.module_name, self.fname, 'src'), + set_replication = True, + ptrack_enable = True, + initdb_params = ['--data-checksums'] + ) + src_pg.slow_start() + src_pg.safe_psql("postgres", "CREATE EXTENSION ptrack") + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do incremental catchup + self.catchup_node( + backup_mode = 'PTRACK', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--dry-run'] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + + def test_dry_run_catchup_delta(self): + """ + Test dry-run option for catchup in incremental delta mode + """ + + # preparation 1: source + src_pg = self.make_simple_node( + base_dir = os.path.join(self.module_name, self.fname, 'src'), + set_replication = True, + initdb_params = ['--data-checksums'], + pg_options = { 'wal_log_hints': 'on' } + ) + src_pg.slow_start() + + src_pg.pgbench_init(scale = 10) + pgbench = src_pg.pgbench(options=['-T', '10', '--no-vacuum']) + pgbench.wait() + + # preparation 2: make clean shutdowned lagging behind replica + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + self.set_replica(src_pg, dst_pg) + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start(replica = True) + dst_pg.stop() + + # save the condition before dry-run + content_before = self.pgdata_content(dst_pg.data_dir) + + # do delta catchup + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = ['-d', 'postgres', '-p', str(src_pg.port), '--stream', "--dry-run"] + ) + + # compare data dirs before and after cathup + self.compare_pgdata( + content_before, + self.pgdata_content(dst_pg.data_dir) + ) + + # Cleanup + src_pg.stop() + + def test_pgdata_is_ignored(self): + """ In catchup we still allow PGDATA to be set either from command line + or from the env var. This test that PGDATA is actually ignored and + --source-pgadta is used instead + """ + node = self.make_simple_node('node', + set_replication = True + ) + node.slow_start() + + # do full catchup + dest = self.make_empty_node('dst') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = node.data_dir, + destination_node = dest, + options = ['-d', 'postgres', '-p', str(node.port), '--stream', '--pgdata=xxx'] + ) + + self.compare_pgdata( + self.pgdata_content(node.data_dir), + self.pgdata_content(dest.data_dir) + ) + + os.environ['PGDATA']='xxx' + + dest2 = self.make_empty_node('dst') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = node.data_dir, + destination_node = dest2, + options = ['-d', 'postgres', '-p', str(node.port), '--stream'] + ) + + self.compare_pgdata( + self.pgdata_content(node.data_dir), + self.pgdata_content(dest2.data_dir) + ) diff --git a/tests/cfs_backup.py b/tests/cfs_backup_test.py similarity index 92% rename from tests/cfs_backup.py rename to tests/cfs_backup_test.py index d820360fe..fb4a6c6b8 100644 --- a/tests/cfs_backup.py +++ b/tests/cfs_backup_test.py @@ -6,7 +6,6 @@ from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'cfs_backup' tblspace_name = 'cfs_tblspace' @@ -14,11 +13,10 @@ class CfsBackupNoEncTest(ProbackupTest, unittest.TestCase): # --- Begin --- # @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def setUp(self): - self.fname = self.id().split('.')[3] self.backup_dir = os.path.join( - self.tmp_path, module_name, self.fname, 'backup') + self.tmp_path, self.module_name, self.fname, 'backup') self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, self.fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -171,12 +169,18 @@ def test_fullbackup_after_create_table(self): "ERROR: File pg_compression not found in {0}".format( os.path.join(self.backup_dir, 'node', backup_id)) ) - self.assertTrue( - find_by_extensions( - [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], - ['.cfm']), - "ERROR: .cfm files not found in backup dir" - ) + + # check cfm size + cfms = find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) # @unittest.expectedFailure # @unittest.skip("skip") @@ -413,6 +417,55 @@ def test_fullbackup_empty_tablespace_page_after_create_table(self): "ERROR: .cfm files not found in backup dir" ) + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_page_doesnt_store_unchanged_cfm(self): + """ + Case: Test page backup doesn't store cfm file if table were not modified + """ + + self.node.safe_psql( + "postgres", + "CREATE TABLE {0} TABLESPACE {1} " + "AS SELECT i AS id, MD5(i::text) AS text, " + "MD5(repeat(i::text,10))::tsvector AS tsvector " + "FROM generate_series(0,256) i".format('t1', tblspace_name) + ) + + self.node.safe_psql("postgres", "checkpoint") + + backup_id_full = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='full') + + self.assertTrue( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id_full)], + ['.cfm']), + "ERROR: .cfm files not found in backup dir" + ) + + backup_id = self.backup_node( + self.backup_dir, 'node', self.node, backup_type='page') + + show_backup = self.show_pb(self.backup_dir, 'node', backup_id) + self.assertEqual( + "OK", + show_backup["status"], + "ERROR: Incremental backup status is not valid. \n " + "Current backup status={0}".format(show_backup["status"]) + ) + self.assertTrue( + find_by_name( + [self.get_tblspace_path(self.node, tblspace_name)], + ['pg_compression']), + "ERROR: File pg_compression not found" + ) + self.assertFalse( + find_by_extensions( + [os.path.join(self.backup_dir, 'backups', 'node', backup_id)], + ['.cfm']), + "ERROR: .cfm files is found in backup dir" + ) + # @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') @@ -694,7 +747,7 @@ def test_multiple_segments(self): 't_heap', tblspace_name) ) - full_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result = self.node.table_checksum("t_heap") try: backup_id_full = self.backup_node( @@ -716,7 +769,7 @@ def test_multiple_segments(self): 't_heap') ) - page_result = self.node.safe_psql("postgres", "SELECT * FROM t_heap") + page_result = self.node.table_checksum("t_heap") try: backup_id_page = self.backup_node( @@ -757,7 +810,7 @@ def test_multiple_segments(self): self.node.slow_start() self.assertEqual( full_result, - self.node.safe_psql("postgres", "SELECT * FROM t_heap"), + self.node.table_checksum("t_heap"), 'Lost data after restore') # CHECK PAGE BACKUP @@ -776,7 +829,7 @@ def test_multiple_segments(self): self.node.slow_start() self.assertEqual( page_result, - self.node.safe_psql("postgres", "SELECT * FROM t_heap"), + self.node.table_checksum("t_heap"), 'Lost data after restore') # @unittest.expectedFailure @@ -810,10 +863,8 @@ def test_multiple_segments_in_multiple_tablespaces(self): "FROM generate_series(0,1005000) i".format( 't_heap_2', tblspace_name_2)) - full_result_1 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_1") - full_result_2 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_2") + full_result_1 = self.node.table_checksum("t_heap_1") + full_result_2 = self.node.table_checksum("t_heap_2") try: backup_id_full = self.backup_node( @@ -844,10 +895,8 @@ def test_multiple_segments_in_multiple_tablespaces(self): 't_heap_2') ) - page_result_1 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_1") - page_result_2 = self.node.safe_psql( - "postgres", "SELECT * FROM t_heap_2") + page_result_1 = self.node.table_checksum("t_heap_1") + page_result_2 = self.node.table_checksum("t_heap_2") try: backup_id_page = self.backup_node( @@ -888,11 +937,11 @@ def test_multiple_segments_in_multiple_tablespaces(self): self.assertEqual( full_result_1, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), + self.node.table_checksum("t_heap_1"), 'Lost data after restore') self.assertEqual( full_result_2, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), + self.node.table_checksum("t_heap_2"), 'Lost data after restore') # CHECK PAGE BACKUP @@ -909,11 +958,11 @@ def test_multiple_segments_in_multiple_tablespaces(self): self.assertEqual( page_result_1, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_1"), + self.node.table_checksum("t_heap_1"), 'Lost data after restore') self.assertEqual( page_result_2, - self.node.safe_psql("postgres", "SELECT * FROM t_heap_2"), + self.node.table_checksum("t_heap_2"), 'Lost data after restore') # @unittest.expectedFailure @@ -983,7 +1032,6 @@ def test_fullbackup_after_create_table_page_after_create_table_stream(self): ) # --- Make backup with not valid data(broken .cfm) --- # - @unittest.expectedFailure # @unittest.skip("skip") @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_delete_random_cfm_file_from_tablespace_dir(self): @@ -995,6 +1043,11 @@ def test_delete_random_cfm_file_from_tablespace_dir(self): "FROM generate_series(0,256) i".format('t1', tblspace_name) ) + self.node.safe_psql( + "postgres", + "CHECKPOINT" + ) + list_cmf = find_by_extensions( [self.get_tblspace_path(self.node, tblspace_name)], ['.cfm']) @@ -1044,6 +1097,11 @@ def test_delete_random_data_file_from_tablespace_dir(self): "FROM generate_series(0,256) i".format('t1', tblspace_name) ) + self.node.safe_psql( + "postgres", + "CHECKPOINT" + ) + list_data_files = find_by_pattern( [self.get_tblspace_path(self.node, tblspace_name)], '^.*/\d+$') @@ -1149,10 +1207,6 @@ def test_broken_file_pg_compression_into_tablespace_dir(self): ) # # --- End ---# -# @unittest.skipUnless(ProbackupTest.enterprise, 'skip') -# def tearDown(self): -# self.node.cleanup() -# self.del_test_dir(module_name, self.fname) #class CfsBackupEncTest(CfsBackupNoEncTest): diff --git a/tests/cfs_catchup_test.py b/tests/cfs_catchup_test.py new file mode 100644 index 000000000..f6760b72c --- /dev/null +++ b/tests/cfs_catchup_test.py @@ -0,0 +1,117 @@ +import os +import unittest +import random +import shutil + +from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file +from .helpers.ptrack_helpers import ProbackupTest, ProbackupException + + +class CfsCatchupNoEncTest(ProbackupTest, unittest.TestCase): + + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') + def test_full_catchup_with_tablespace(self): + """ + Test tablespace transfers + """ + # preparation + src_pg = self.make_simple_node( + base_dir = os.path.join(self.module_name, self.fname, 'src'), + set_replication = True + ) + src_pg.slow_start() + tblspace1_old_path = self.get_tblspace_path(src_pg, 'tblspace1_old') + self.create_tblspace_in_node(src_pg, 'tblspace1', tblspc_path = tblspace1_old_path, cfs=True) + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question TABLESPACE tblspace1 AS SELECT 42 AS answer") + src_query_result = src_pg.table_checksum("ultimate_question") + src_pg.safe_psql( + "postgres", + "CHECKPOINT") + + # do full catchup with tablespace mapping + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + tblspace1_new_path = self.get_tblspace_path(dst_pg, 'tblspace1_new') + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream', + '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) + ] + ) + + # 1st check: compare data directories + self.compare_pgdata( + self.pgdata_content(src_pg.data_dir), + self.pgdata_content(dst_pg.data_dir) + ) + + # check cfm size + cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) + + # make changes in master tablespace + src_pg.safe_psql( + "postgres", + "UPDATE ultimate_question SET answer = -1") + src_pg.safe_psql( + "postgres", + "CHECKPOINT") + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + # 2nd check: run verification query + dst_query_result = dst_pg.table_checksum("ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') + + # and now delta backup + dst_pg.stop() + + self.catchup_node( + backup_mode = 'DELTA', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', + '-p', str(src_pg.port), + '--stream', + '-T', '{0}={1}'.format(tblspace1_old_path, tblspace1_new_path) + ] + ) + + # check cfm size again + cfms = find_by_extensions([os.path.join(dst_pg.data_dir)], ['.cfm']) + self.assertTrue(cfms, "ERROR: .cfm files not found in backup dir") + for cfm in cfms: + size = os.stat(cfm).st_size + self.assertLessEqual(size, 4096, + "ERROR: {0} is not truncated (has size {1} > 4096)".format( + cfm, size + )) + + # run&recover catchup'ed instance + dst_options = {} + dst_options['port'] = str(dst_pg.port) + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + + + # 3rd check: run verification query + src_query_result = src_pg.table_checksum("ultimate_question") + dst_query_result = dst_pg.table_checksum("ultimate_question") + self.assertEqual(src_query_result, dst_query_result, 'Different answer from copy') diff --git a/tests/cfs_restore.py b/tests/cfs_restore_test.py similarity index 91% rename from tests/cfs_restore.py rename to tests/cfs_restore_test.py index 07cf891aa..2fa35e71a 100644 --- a/tests/cfs_restore.py +++ b/tests/cfs_restore_test.py @@ -15,20 +15,17 @@ from .helpers.cfs_helpers import find_by_name from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - -module_name = 'cfs_restore' - tblspace_name = 'cfs_tblspace' tblspace_name_new = 'cfs_tblspace_new' class CfsRestoreBase(ProbackupTest, unittest.TestCase): + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def setUp(self): - self.fname = self.id().split('.')[3] - self.backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + self.backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, self.fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -60,14 +57,11 @@ def setUp(self): def add_data_in_cluster(self): pass - def tearDown(self): - self.node.cleanup() - self.del_test_dir(module_name, self.fname) - class CfsRestoreNoencEmptyTablespaceTest(CfsRestoreBase): # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_empty_tablespace_from_fullbackup(self): """ Case: Restore empty tablespace from valid full backup. @@ -102,7 +96,7 @@ def test_restore_empty_tablespace_from_fullbackup(self): tblspace = self.node.safe_psql( "postgres", "SELECT * FROM pg_tablespace WHERE spcname='{0}'".format(tblspace_name) - ) + ).decode("UTF-8") self.assertTrue( tblspace_name in tblspace and "compression=true" in tblspace, "ERROR: The tablespace not restored or it restored without compressions" @@ -118,14 +112,12 @@ def add_data_in_cluster(self): MD5(repeat(i::text,10))::tsvector AS tsvector \ FROM generate_series(0,1e5) i'.format('t1', tblspace_name) ) - self.table_t1 = self.node.safe_psql( - "postgres", - "SELECT * FROM t1" - ) + self.table_t1 = self.node.table_checksum("t1") # --- Restore from full backup ---# # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_old_location(self): """ Case: Restore instance from valid full backup to old location. @@ -159,12 +151,13 @@ def test_restore_from_fullbackup_to_old_location(self): ) self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_old_location_3_jobs(self): """ Case: Restore instance from valid full backup to old location. @@ -197,12 +190,13 @@ def test_restore_from_fullbackup_to_old_location_3_jobs(self): ) self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_new_location(self): """ Case: Restore instance from valid full backup to new location. @@ -211,7 +205,7 @@ def test_restore_from_fullbackup_to_new_location(self): self.node.cleanup() shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname)) + node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) node_new.cleanup() try: @@ -239,13 +233,14 @@ def test_restore_from_fullbackup_to_new_location(self): ) self.assertEqual( - repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + node_new.table_checksum("t1"), + self.table_t1 ) node_new.cleanup() # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_new_location_5_jobs(self): """ Case: Restore instance from valid full backup to new location. @@ -254,7 +249,7 @@ def test_restore_from_fullbackup_to_new_location_5_jobs(self): self.node.cleanup() shutil.rmtree(self.get_tblspace_path(self.node, tblspace_name)) - node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(module_name, self.fname)) + node_new = self.make_simple_node(base_dir="{0}/{1}/node_new_location".format(self.module_name, self.fname)) node_new.cleanup() try: @@ -282,13 +277,14 @@ def test_restore_from_fullbackup_to_new_location_5_jobs(self): ) self.assertEqual( - repr(node_new.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + node_new.table_checksum("t1"), + self.table_t1 ) node_new.cleanup() # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): self.node.stop() self.node.cleanup() @@ -329,12 +325,13 @@ def test_restore_from_fullbackup_to_old_location_tablespace_new_location(self): ) self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) # @unittest.expectedFailure # @unittest.skip("skip") + @unittest.skipUnless(ProbackupTest.enterprise, 'skip') def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs(self): self.node.stop() self.node.cleanup() @@ -375,8 +372,8 @@ def test_restore_from_fullbackup_to_old_location_tablespace_new_location_3_jobs( ) self.assertEqual( - repr(self.node.safe_psql("postgres", "SELECT * FROM %s" % 't1')), - repr(self.table_t1) + self.node.table_checksum("t1"), + self.table_t1 ) # @unittest.expectedFailure diff --git a/tests/cfs_validate_backup.py b/tests/cfs_validate_backup_test.py similarity index 94% rename from tests/cfs_validate_backup.py rename to tests/cfs_validate_backup_test.py index eea6f0e21..343020dfc 100644 --- a/tests/cfs_validate_backup.py +++ b/tests/cfs_validate_backup_test.py @@ -5,7 +5,6 @@ from .helpers.cfs_helpers import find_by_extensions, find_by_name, find_by_pattern, corrupt_file from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'cfs_validate_backup' tblspace_name = 'cfs_tblspace' diff --git a/tests/checkdb.py b/tests/checkdb_test.py similarity index 94% rename from tests/checkdb.py rename to tests/checkdb_test.py index 9b7adcd71..eb46aea19 100644 --- a/tests/checkdb.py +++ b/tests/checkdb_test.py @@ -9,18 +9,16 @@ import time -module_name = 'checkdb' - - class CheckdbTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_checkdb_amcheck_only_sanity(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums']) @@ -36,6 +34,17 @@ def test_checkdb_amcheck_only_sanity(self): node.safe_psql( "postgres", "create index on t_heap(id)") + + node.safe_psql( + "postgres", + "create table idxpart (a int) " + "partition by range (a)") + + # there aren't partitioned indexes on 10 and lesser versions + if self.get_version(node) >= 110000: + node.safe_psql( + "postgres", + "create index on idxpart(a)") try: node.safe_psql( @@ -122,7 +131,7 @@ def test_checkdb_amcheck_only_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: required parameter not specified: --instance", + "ERROR: Required parameter not specified: --instance", e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -212,15 +221,13 @@ def test_checkdb_amcheck_only_sanity(self): # Clean after yourself gdb.kill() node.stop() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_basic_checkdb_amcheck_only_sanity(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums']) @@ -351,18 +358,16 @@ def test_basic_checkdb_amcheck_only_sanity(self): # Clean after yourself node.stop() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_checkdb_block_validation_sanity(self): """make node, corrupt some pages, check that checkdb failed""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -392,7 +397,7 @@ def test_checkdb_block_validation_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: required parameter not specified: PGDATA (-D, --pgdata)", + "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -448,14 +453,12 @@ def test_checkdb_block_validation_sanity(self): # Clean after yourself node.stop() - self.del_test_dir(module_name, fname) def test_checkdb_checkunique(self): """Test checkunique parameter of amcheck.bt_index_check function""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) node.slow_start() @@ -539,20 +542,15 @@ def test_checkdb_checkunique(self): # Clean after yourself node.stop() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_checkdb_sigint_handling(self): """""" - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -597,15 +595,13 @@ def test_checkdb_sigint_handling(self): # Clean after yourself gdb.kill() node.stop() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_checkdb_with_least_privileges(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -667,8 +663,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' # amcheck-next function - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') # amcheck-next function + # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -697,8 +693,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' # 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + # PG 10 elif self.get_version(node) > 100000 and self.get_version(node) < 110000: node.safe_psql( @@ -726,7 +722,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup;' - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup;') + if ProbackupTest.enterprise: # amcheck-1.1 node.safe_psql( @@ -765,8 +762,8 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + # checkunique parameter if ProbackupTest.enterprise: if (self.get_version(node) >= 111300 and self.get_version(node) < 120000 @@ -803,18 +800,19 @@ def test_checkdb_with_least_privileges(self): 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anycompatiblearray, anycompatible) TO backup; ' 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - ) + 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;') + # checkunique parameter if ProbackupTest.enterprise: node.safe_psql( "backupdb", "GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool, bool) TO backup") - if ProbackupTest.enterprise: + if ProbackupTest.pgpro: node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") + 'backupdb', + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') # checkdb try: @@ -851,4 +849,3 @@ def test_checkdb_with_least_privileges(self): # Clean after yourself node.stop() - self.del_test_dir(module_name, fname) diff --git a/tests/compatibility.py b/tests/compatibility_test.py similarity index 86% rename from tests/compatibility.py rename to tests/compatibility_test.py index e274c22be..7ae8baf9f 100644 --- a/tests/compatibility.py +++ b/tests/compatibility_test.py @@ -5,19 +5,97 @@ from sys import exit import shutil -module_name = 'compatibility' + +def check_manual_tests_enabled(): + return 'PGPROBACKUP_MANUAL' in os.environ and os.environ['PGPROBACKUP_MANUAL'] == 'ON' + + +def check_ssh_agent_path_exists(): + return 'PGPROBACKUP_SSH_AGENT_PATH' in os.environ + + +class CrossCompatibilityTest(ProbackupTest, unittest.TestCase): + @unittest.skipUnless(check_manual_tests_enabled(), 'skip manual test') + @unittest.skipUnless(check_ssh_agent_path_exists(), 'skip no ssh agent path exist') + # @unittest.skip("skip") + def test_catchup_with_different_remote_major_pg(self): + """ + Decription in jira issue PBCKP-236 + This test exposures ticket error using pg_probackup builds for both PGPROEE11 and PGPROEE9_6 + + Prerequisites: + - pg_probackup git tag for PBCKP 2.5.1 + - master pg_probackup build should be made for PGPROEE11 + - agent pg_probackup build should be made for PGPROEE9_6 + + Calling probackup PGPROEE9_6 pg_probackup agent from PGPROEE11 pg_probackup master for DELTA backup causes + the PBCKP-236 problem + + Please give env variables PROBACKUP_MANUAL=ON;PGPROBACKUP_SSH_AGENT_PATH= + for the test + + Please make path for agent's pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.ee.9.6/bin/' + without pg_probackup executable + """ + + self.verbose = True + self.remote = True + # please use your own local path like + # pgprobackup_ssh_agent_path = '/home/avaness/postgres/postgres.build.clean/bin/' + pgprobackup_ssh_agent_path = os.environ['PGPROBACKUP_SSH_AGENT_PATH'] + + src_pg = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'src'), + set_replication=True, + ) + src_pg.slow_start() + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question AS SELECT 42 AS answer") + + # do full catchup + dst_pg = self.make_empty_node(os.path.join(self.module_name, self.fname, 'dst')) + self.catchup_node( + backup_mode='FULL', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream'] + ) + + dst_options = {'port': str(dst_pg.port)} + self.set_auto_conf(dst_pg, dst_options) + dst_pg.slow_start() + dst_pg.stop() + + src_pg.safe_psql( + "postgres", + "CREATE TABLE ultimate_question2 AS SELECT 42 AS answer") + + # do delta catchup with remote pg_probackup agent with another postgres major version + # this DELTA backup should fail without PBCKP-236 patch. + self.catchup_node( + backup_mode='DELTA', + source_pgdata=src_pg.data_dir, + destination_node=dst_pg, + # here's substitution of --remoge-path pg_probackup agent compiled with another postgres version + options=['-d', 'postgres', '-p', str(src_pg.port), '--stream', '--remote-path=' + pgprobackup_ssh_agent_path] + ) class CompatibilityTest(ProbackupTest, unittest.TestCase): + def setUp(self): + super().setUp() + if not self.probackup_old_path: + self.skipTest('PGPROBACKUPBIN_OLD is not set') + # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_page(self): """Description in jira issue PGPRO-434""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -45,7 +123,7 @@ def test_backward_compatibility_page(self): # RESTORE old FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -142,17 +220,13 @@ def test_backward_compatibility_page(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_delta(self): """Description in jira issue PGPRO-434""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -180,7 +254,7 @@ def test_backward_compatibility_delta(self): # RESTORE old FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -276,21 +350,17 @@ def test_backward_compatibility_delta(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_ptrack(self): """Description in jira issue PGPRO-434""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -323,7 +393,7 @@ def test_backward_compatibility_ptrack(self): # RESTORE old FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -390,17 +460,13 @@ def test_backward_compatibility_ptrack(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_compression(self): """Description in jira issue PGPRO-434""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -423,7 +489,7 @@ def test_backward_compatibility_compression(self): # restore OLD FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -549,9 +615,6 @@ def test_backward_compatibility_compression(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge(self): @@ -559,10 +622,9 @@ def test_backward_compatibility_merge(self): Create node, take FULL and PAGE backups with old binary, merge them with new binary """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -593,7 +655,7 @@ def test_backward_compatibility_merge(self): # restore OLD FULL with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -604,9 +666,6 @@ def test_backward_compatibility_merge(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_1(self): @@ -615,10 +674,9 @@ def test_backward_compatibility_merge_1(self): merge them with new binary. old binary version =< 2.2.7 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -668,7 +726,7 @@ def test_backward_compatibility_merge_1(self): # restore merged backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -676,9 +734,6 @@ def test_backward_compatibility_merge_1(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_2(self): @@ -687,10 +742,9 @@ def test_backward_compatibility_merge_2(self): merge them with new binary. old binary version =< 2.2.7 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -707,7 +761,7 @@ def test_backward_compatibility_merge_2(self): 'VACUUM pgbench_accounts') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # FULL backup with OLD binary self.backup_node(backup_dir, 'node', node, old_binary=True) @@ -798,9 +852,6 @@ def test_backward_compatibility_merge_2(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata4, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_3(self): @@ -809,10 +860,9 @@ def test_backward_compatibility_merge_3(self): merge them with new binary. old binary version =< 2.2.7 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -829,7 +879,7 @@ def test_backward_compatibility_merge_3(self): 'VACUUM pgbench_accounts') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # FULL backup with OLD binary self.backup_node( @@ -921,9 +971,6 @@ def test_backward_compatibility_merge_3(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata4, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_4(self): @@ -935,10 +982,9 @@ def test_backward_compatibility_merge_4(self): self.assertTrue( False, 'You need pg_probackup old_binary =< 2.4.0 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -955,7 +1001,7 @@ def test_backward_compatibility_merge_4(self): 'VACUUM pgbench_accounts') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # FULL backup with OLD binary self.backup_node( @@ -998,9 +1044,6 @@ def test_backward_compatibility_merge_4(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_backward_compatibility_merge_5(self): @@ -1017,10 +1060,9 @@ def test_backward_compatibility_merge_5(self): self.version_to_num(self.old_probackup_version), self.version_to_num(self.probackup_version)) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1070,7 +1112,7 @@ def test_backward_compatibility_merge_5(self): # restore merged backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -1078,9 +1120,6 @@ def test_backward_compatibility_merge_5(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_vacuum_truncate(self): """ @@ -1092,10 +1131,9 @@ def test_page_vacuum_truncate(self): and check data correctness old binary should be 2.2.x version """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1143,7 +1181,7 @@ def test_page_vacuum_truncate(self): pgdata3 = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1182,9 +1220,6 @@ def test_page_vacuum_truncate(self): node_restored.slow_start() node_restored.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_vacuum_truncate_compression(self): """ @@ -1196,10 +1231,9 @@ def test_page_vacuum_truncate_compression(self): and check data correctness old binary should be 2.2.x version """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1249,7 +1283,7 @@ def test_page_vacuum_truncate_compression(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -1261,9 +1295,6 @@ def test_page_vacuum_truncate_compression(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_vacuum_truncate_compressed_1(self): """ @@ -1275,10 +1306,9 @@ def test_page_vacuum_truncate_compressed_1(self): and check data correctness old binary should be 2.2.x version """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1330,7 +1360,7 @@ def test_page_vacuum_truncate_compressed_1(self): pgdata3 = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1369,9 +1399,6 @@ def test_page_vacuum_truncate_compressed_1(self): node_restored.slow_start() node_restored.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_hidden_files(self): """ @@ -1380,10 +1407,9 @@ def test_hidden_files(self): with old binary, then try to delete backup with new binary """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1398,21 +1424,17 @@ def test_hidden_files(self): self.delete_pb(backup_dir, 'node', backup_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") + # @unittest.skip("skip") def test_compatibility_tablespace(self): """ https://github.com/postgrespro/pg_probackup/issues/348 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node, old_binary=True) @@ -1438,7 +1460,7 @@ def test_compatibility_tablespace(self): tblspace_new_path = self.get_tblspace_path(node, 'tblspace_new') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -1479,6 +1501,3 @@ def test_compatibility_tablespace(self): if self.paranoia: pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/compression.py b/tests/compression_test.py similarity index 83% rename from tests/compression.py rename to tests/compression_test.py index c10a59489..55924b9d2 100644 --- a/tests/compression.py +++ b/tests/compression_test.py @@ -5,9 +5,6 @@ import subprocess -module_name = 'compression' - - class CompressionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -18,10 +15,9 @@ def test_basic_compression_stream_zlib(self): check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -36,7 +32,7 @@ def test_basic_compression_stream_zlib(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,256) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=[ @@ -49,7 +45,7 @@ def test_basic_compression_stream_zlib(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=[ @@ -61,7 +57,7 @@ def test_basic_compression_stream_zlib(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,768) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--stream', '--compress-algorithm=zlib']) @@ -81,7 +77,7 @@ def test_basic_compression_stream_zlib(self): repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -97,7 +93,7 @@ def test_basic_compression_stream_zlib(self): repr(self.output), self.cmd)) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -113,22 +109,18 @@ def test_basic_compression_stream_zlib(self): repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_compression_archive_zlib(self): """ make archive node, make full and page backups, check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -142,7 +134,7 @@ def test_compression_archive_zlib(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=["--compress-algorithm=zlib"]) @@ -153,7 +145,7 @@ def test_compression_archive_zlib(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(0,2) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=["--compress-algorithm=zlib"]) @@ -163,7 +155,7 @@ def test_compression_archive_zlib(self): "postgres", "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,3) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--compress-algorithm=zlib']) @@ -183,7 +175,7 @@ def test_compression_archive_zlib(self): repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -199,7 +191,7 @@ def test_compression_archive_zlib(self): repr(self.output), self.cmd)) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -215,23 +207,19 @@ def test_compression_archive_zlib(self): repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_compression_stream_pglz(self): """ make archive node, make full and page stream backups, check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -246,7 +234,7 @@ def test_compression_stream_pglz(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,256) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=['--stream', '--compress-algorithm=pglz']) @@ -257,7 +245,7 @@ def test_compression_stream_pglz(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=['--stream', '--compress-algorithm=pglz']) @@ -268,7 +256,7 @@ def test_compression_stream_pglz(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,768) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--stream', '--compress-algorithm=pglz']) @@ -288,7 +276,7 @@ def test_compression_stream_pglz(self): repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -304,7 +292,7 @@ def test_compression_stream_pglz(self): repr(self.output), self.cmd)) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -320,23 +308,19 @@ def test_compression_stream_pglz(self): repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_compression_archive_pglz(self): """ make archive node, make full and page backups, check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -351,7 +335,7 @@ def test_compression_archive_pglz(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=['--compress-algorithm=pglz']) @@ -362,7 +346,7 @@ def test_compression_archive_pglz(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=['--compress-algorithm=pglz']) @@ -373,7 +357,7 @@ def test_compression_archive_pglz(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(200,300) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--compress-algorithm=pglz']) @@ -393,7 +377,7 @@ def test_compression_archive_pglz(self): repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -409,7 +393,7 @@ def test_compression_archive_pglz(self): repr(self.output), self.cmd)) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() @@ -425,23 +409,19 @@ def test_compression_archive_pglz(self): repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_compression_wrong_algorithm(self): """ make archive node, make full and page backups, check data correctness in restored instance """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -463,13 +443,10 @@ def test_compression_wrong_algorithm(self): except ProbackupException as e: self.assertEqual( e.message, - 'ERROR: invalid compress algorithm value "bla-blah"\n', + 'ERROR: Invalid compress algorithm value "bla-blah"\n', '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incompressible_pages(self): """ @@ -477,10 +454,9 @@ def test_incompressible_pages(self): take backup with compression, make sure that page was not compressed, restore backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -517,6 +493,3 @@ def test_incompressible_pages(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/config.py b/tests/config_test.py similarity index 90% rename from tests/config.py rename to tests/config_test.py index b41382204..b1a0f9295 100644 --- a/tests/config.py +++ b/tests/config_test.py @@ -5,19 +5,16 @@ from sys import exit from shutil import copyfile -module_name = 'config' - class ConfigTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure # @unittest.skip("skip") def test_remove_instance_config(self): - """remove pg_probackup.conf""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + """remove pg_probackup.conself.f""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -57,10 +54,9 @@ def test_remove_instance_config(self): # @unittest.skip("skip") def test_corrupt_backup_content(self): """corrupt backup_content.control""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) diff --git a/tests/delete.py b/tests/delete_test.py similarity index 88% rename from tests/delete.py rename to tests/delete_test.py index 345a70284..10100887d 100644 --- a/tests/delete.py +++ b/tests/delete_test.py @@ -2,10 +2,6 @@ import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import subprocess -from sys import exit - - -module_name = 'delete' class DeleteTest(ProbackupTest, unittest.TestCase): @@ -14,12 +10,11 @@ class DeleteTest(ProbackupTest, unittest.TestCase): # @unittest.expectedFailure def test_delete_full_backups(self): """delete full backups""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -51,19 +46,15 @@ def test_delete_full_backups(self): self.assertEqual(show_backups[0]['id'], id_1) self.assertEqual(show_backups[1]['id'], id_3) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_del_instance_archive(self): """delete full backups""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -83,19 +74,15 @@ def test_del_instance_archive(self): # Delete instance self.del_instance(backup_dir, 'node') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_delete_archive_mix_compress_and_non_compressed_segments(self): """delete full backups""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving( @@ -142,18 +129,14 @@ def test_delete_archive_mix_compress_and_non_compressed_segments(self): '--retention-redundancy=3', '--delete-expired']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_increment_page(self): """delete increment and all after him""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -182,22 +165,18 @@ def test_delete_increment_page(self): self.assertEqual(show_backups[1]['backup-mode'], "FULL") self.assertEqual(show_backups[1]['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_increment_ptrack(self): """delete increment and all after him""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -230,9 +209,6 @@ def test_delete_increment_ptrack(self): self.assertEqual(show_backups[1]['backup-mode'], "FULL") self.assertEqual(show_backups[1]['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_orphaned_wal_segments(self): """ @@ -240,12 +216,11 @@ def test_delete_orphaned_wal_segments(self): delete second backup without --wal option, then delete orphaned wals via --wal option """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -302,9 +277,6 @@ def test_delete_orphaned_wal_segments(self): wals = [f for f in os.listdir(wals_dir) if os.path.isfile(os.path.join(wals_dir, f))] self.assertEqual (0, len(wals), "Number of wals should be equal to 0") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_wal_between_multiple_timelines(self): """ @@ -315,12 +287,11 @@ def test_delete_wal_between_multiple_timelines(self): [A1, B1) are deleted and backups B1 and A2 keep their WAL """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -332,7 +303,7 @@ def test_delete_wal_between_multiple_timelines(self): node.pgbench_init(scale=3) node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() self.restore_node(backup_dir, 'node', node2) @@ -356,22 +327,18 @@ def test_delete_wal_between_multiple_timelines(self): self.validate_pb(backup_dir) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_backup_with_empty_control_file(self): """ take backup, truncate its control file, try to delete it via 'delete' command """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -397,18 +364,14 @@ def test_delete_backup_with_empty_control_file(self): self.delete_pb(backup_dir, 'node', backup_id=backup_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -521,9 +484,6 @@ def test_delete_interleaved_incremental_chains(self): print(self.show_pb( backup_dir, 'node', as_json=False, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_multiple_descendants(self): """ @@ -536,12 +496,11 @@ def test_delete_multiple_descendants(self): FULLb | FULLa should be deleted """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -693,9 +652,6 @@ def test_delete_multiple_descendants(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delete_multiple_descendants_dry_run(self): """ @@ -706,12 +662,11 @@ def test_delete_multiple_descendants_dry_run(self): | FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -798,17 +753,13 @@ def test_delete_multiple_descendants_dry_run(self): self.validate_pb(backup_dir, 'node') - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_delete_error_backups(self): """delete increment and all after him""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -869,6 +820,3 @@ def test_delete_error_backups(self): self.assertEqual(show_backups[1]['status'], "OK") self.assertEqual(show_backups[2]['status'], "OK") self.assertEqual(show_backups[3]['status'], "OK") - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/delta.py b/tests/delta_test.py similarity index 85% rename from tests/delta.py rename to tests/delta_test.py index f365b6f9b..8736a079c 100644 --- a/tests/delta.py +++ b/tests/delta_test.py @@ -8,9 +8,6 @@ from threading import Thread -module_name = 'delta' - - class DeltaTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -21,15 +18,14 @@ def test_basic_delta_vacuum_truncate(self): take delta backup, take second delta backup, restore latest delta backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -77,9 +73,6 @@ def test_basic_delta_vacuum_truncate(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_vacuum_truncate_1(self): """ @@ -88,15 +81,14 @@ def test_delta_vacuum_truncate_1(self): take delta backup, take second delta backup, restore latest delta backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), ) self.init_pb(backup_dir) @@ -161,9 +153,6 @@ def test_delta_vacuum_truncate_1(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_vacuum_truncate_2(self): """ @@ -172,15 +161,14 @@ def test_delta_vacuum_truncate_2(self): take delta backup, take second delta backup, restore latest delta backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), ) self.init_pb(backup_dir) @@ -223,19 +211,15 @@ def test_delta_vacuum_truncate_2(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_stream(self): """ make archive node, take full and delta stream backups, restore them and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -255,7 +239,7 @@ def test_delta_stream(self): "md5(i::text)::tsvector as tsvector " "from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=['--stream']) @@ -266,7 +250,7 @@ def test_delta_stream(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta', options=['--stream']) @@ -286,7 +270,7 @@ def test_delta_stream(self): '\n Unexpected Error Message: {0}\n' ' CMD: {1}'.format(repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -302,13 +286,10 @@ def test_delta_stream(self): '\n Unexpected Error Message: {0}\n' ' CMD: {1}'.format(repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_archive(self): """ @@ -316,10 +297,9 @@ def test_delta_archive(self): restore them and check data correctness """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -333,7 +313,7 @@ def test_delta_archive(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,1) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full') @@ -342,7 +322,7 @@ def test_delta_archive(self): "postgres", "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,2) i") - delta_result = node.execute("postgres", "SELECT * FROM t_heap") + delta_result = node.table_checksum("t_heap") delta_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='delta') @@ -361,7 +341,7 @@ def test_delta_archive(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -377,23 +357,19 @@ def test_delta_archive(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) node.slow_start() - delta_result_new = node.execute("postgres", "SELECT * FROM t_heap") + delta_result_new = node.table_checksum("t_heap") self.assertEqual(delta_result, delta_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_multiple_segments(self): """ Make node, create table with multiple segments, write some data to it, check delta and data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -424,7 +400,7 @@ def test_delta_multiple_segments(self): node.safe_psql("postgres", "checkpoint") # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select count(*) from pgbench_accounts") + result = node.table_checksum("pgbench_accounts") # delta BACKUP self.backup_node( backup_dir, 'node', node, @@ -434,7 +410,7 @@ def test_delta_multiple_segments(self): # RESTORE NODE restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'restored_node')) + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( @@ -453,9 +429,7 @@ def test_delta_multiple_segments(self): self.set_auto_conf(restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.safe_psql( - "postgres", - "select count(*) from pgbench_accounts") + result_new = restored_node.table_checksum("pgbench_accounts") # COMPARE RESTORED FILES self.assertEqual(result, result_new, 'data is lost') @@ -463,29 +437,22 @@ def test_delta_multiple_segments(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_vacuum_full(self): """ make node, make full and delta stream backups, restore them and check data correctness """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.init_pb(backup_dir) @@ -545,19 +512,15 @@ def test_delta_vacuum_full(self): node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_create_db(self): """ Make node, take full backup, create database db1, take delta backup, restore database and check it presense """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -575,7 +538,7 @@ def test_create_db(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node, options=["--stream"]) @@ -599,7 +562,7 @@ def test_create_db(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -670,19 +633,15 @@ def test_create_db(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_exists_in_previous_backup(self): """ Make node, take full backup, create table, take page backup, take delta backup, check that file is no fully copied to delta backup """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -702,7 +661,7 @@ def test_exists_in_previous_backup(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") filepath = node.safe_psql( "postgres", "SELECT pg_relation_filepath('t_heap')").decode('utf-8').rstrip() @@ -753,7 +712,7 @@ def test_exists_in_previous_backup(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -776,19 +735,15 @@ def test_exists_in_previous_backup(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_alter_table_set_tablespace_delta(self): """ Make node, create tablespace with table, take full backup, alter tablespace location, take delta backup, restore database. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -817,8 +772,7 @@ def test_alter_table_set_tablespace_delta(self): "alter table t_heap set tablespace somedata_new") # DELTA BACKUP - result = node.safe_psql( - "postgres", "select * from t_heap") + result = node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node, backup_type='delta', @@ -829,7 +783,7 @@ def test_alter_table_set_tablespace_delta(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -856,14 +810,10 @@ def test_alter_table_set_tablespace_delta(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.safe_psql( - "postgres", "select * from t_heap") + result_new = node_restored.table_checksum("t_heap") self.assertEqual(result, result_new, 'lost some data after restore') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_alter_database_set_tablespace_delta(self): """ @@ -871,10 +821,9 @@ def test_alter_database_set_tablespace_delta(self): take delta backup, alter database tablespace location, take delta backup restore last delta backup. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) @@ -922,7 +871,7 @@ def test_alter_database_set_tablespace_delta(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -950,19 +899,15 @@ def test_alter_database_set_tablespace_delta(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_delta_delete(self): """ Make node, create tablespace with table, take full backup, alter tablespace location, take delta backup, restore database. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -1008,7 +953,7 @@ def test_delta_delete(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -1032,20 +977,16 @@ def test_delta_delete(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_delta_nullified_heap_page_backup(self): """ make node, take full backup, nullify some heap block, take delta backup, restore, physically compare pgdata`s """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1096,7 +1037,7 @@ def test_delta_nullified_heap_page_backup(self): # Restore DELTA backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1106,21 +1047,17 @@ def test_delta_nullified_heap_page_backup(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_delta_backup_from_past(self): """ make node, take FULL stream backup, take DELTA stream backup, restore FULL backup, try to take second DELTA stream backup """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1161,22 +1098,18 @@ def test_delta_backup_from_past(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_delta_pg_resetxlog(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1254,7 +1187,7 @@ def test_delta_pg_resetxlog(self): # pgdata = self.pgdata_content(node.data_dir) # # node_restored = self.make_simple_node( -# base_dir=os.path.join(module_name, fname, 'node_restored')) +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # node_restored.cleanup() # # self.restore_node( @@ -1262,6 +1195,3 @@ def test_delta_pg_resetxlog(self): # # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/exclude.py b/tests/exclude_test.py similarity index 84% rename from tests/exclude.py rename to tests/exclude_test.py index b98a483d0..cb3530cd5 100644 --- a/tests/exclude.py +++ b/tests/exclude_test.py @@ -3,19 +3,15 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'exclude' - - class ExcludeTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_exclude_temp_files(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -53,9 +49,6 @@ def test_exclude_temp_files(self): # TODO check temporary tablespaces - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_exclude_temp_tables(self): @@ -63,10 +56,9 @@ def test_exclude_temp_tables(self): make node without archiving, create temp table, take full backup, check that temp table not present in backup catalogue """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -139,9 +131,6 @@ def test_exclude_temp_tables(self): "Found temp table file in backup catalogue.\n " "Filepath: {0}".format(file)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_exclude_unlogged_tables_1(self): """ @@ -149,10 +138,9 @@ def test_exclude_unlogged_tables_1(self): alter table to unlogged, take delta backup, restore delta backup, check that PGDATA`s are physically the same """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -186,7 +174,7 @@ def test_exclude_unlogged_tables_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -197,19 +185,17 @@ def test_exclude_unlogged_tables_1(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_exclude_unlogged_tables_2(self): """ - make node, create unlogged, take FULL, check - that unlogged was not backed up + 1. make node, create unlogged, take FULL, DELTA, PAGE, + check that unlogged table files was not backed up + 2. restore FULL, DELTA, PAGE to empty db, + ensure unlogged table exist and is epmty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -220,6 +206,8 @@ def test_exclude_unlogged_tables_2(self): self.set_archiving(backup_dir, 'node', node) node.slow_start() + backup_ids = [] + for backup_type in ['full', 'delta', 'page']: if backup_type == 'full': @@ -231,14 +219,16 @@ def test_exclude_unlogged_tables_2(self): 'postgres', 'insert into test select generate_series(0,20050000)::text') - rel_path = node.safe_psql( + rel_path = node.execute( 'postgres', - "select pg_relation_filepath('test')").decode('utf-8').rstrip() + "select pg_relation_filepath('test')")[0][0] backup_id = self.backup_node( backup_dir, 'node', node, backup_type=backup_type, options=['--stream']) + backup_ids.append(backup_id) + filelist = self.get_backup_filelist( backup_dir, 'node', backup_id) @@ -258,18 +248,29 @@ def test_exclude_unlogged_tables_2(self): rel_path + '.3', filelist, "Unlogged table was not excluded") - # Clean after yourself - self.del_test_dir(module_name, fname) + # ensure restoring retrieves back only empty unlogged table + for backup_id in backup_ids: + node.stop() + node.cleanup() + + self.restore_node(backup_dir, 'node', node, backup_id=backup_id) + + node.slow_start() + + self.assertEqual( + node.execute( + 'postgres', + 'select count(*) from test')[0][0], + 0) # @unittest.skip("skip") def test_exclude_log_dir(self): """ check that by default 'log' and 'pg_log' directories are not backed up """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -299,18 +300,14 @@ def test_exclude_log_dir(self): self.assertTrue(os.path.exists(path)) self.assertFalse(os.path.exists(log_file)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_exclude_log_dir_1(self): """ check that "--backup-pg-log" works correctly """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -339,6 +336,3 @@ def test_exclude_log_dir_1(self): log_file = os.path.join(path, 'postgresql.log') self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(log_file)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/expected/option_help.out b/tests/expected/option_help.out index a8b4a64b3..f0c77ae16 100644 --- a/tests/expected/option_help.out +++ b/tests/expected/option_help.out @@ -5,13 +5,14 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. pg_probackup version - pg_probackup init -B backup-path + pg_probackup init -B backup-dir - pg_probackup set-config -B backup-path --instance=instance_name + pg_probackup set-config -B backup-dir --instance=instance-name [-D pgdata-path] [--external-dirs=external-directories-paths] [--log-level-console=log-level-console] [--log-level-file=log-level-file] + [--log-format-file=log-format-file] [--log-filename=log-filename] [--error-log-filename=error-log-filename] [--log-directory=log-directory] @@ -31,16 +32,17 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--archive-port=port] [--archive-user=username] [--help] - pg_probackup set-backup -B backup-path --instance=instance_name + pg_probackup set-backup -B backup-dir --instance=instance-name -i backup-id [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup show-config -B backup-path --instance=instance_name + pg_probackup show-config -B backup-dir --instance=instance-name [--format=format] + [--no-scale-units] [--help] - pg_probackup backup -B backup-path -b backup-mode --instance=instance_name + pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name [-D pgdata-path] [-C] [--stream [-S slot-name] [--temp-slot]] [--backup-pg-log] [-j num-threads] [--progress] @@ -49,6 +51,8 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--no-sync] [--log-level-console=log-level-console] [--log-level-file=log-level-file] + [--log-format-console=log-format-console] + [--log-format-file=log-format-file] [--log-filename=log-filename] [--error-log-filename=error-log-filename] [--log-directory=log-directory] @@ -70,7 +74,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ttl=interval] [--expire-time=timestamp] [--note=text] [--help] - pg_probackup restore -B backup-path --instance=instance_name + pg_probackup restore -B backup-dir --instance=instance-name [-D pgdata-path] [-i backup-id] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -86,8 +90,10 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [-T OLDDIR=NEWDIR] [--progress] [--external-mapping=OLDDIR=NEWDIR] [--skip-external-dirs] [--no-sync] + [-X WALDIR | --waldir=WALDIR] [-I | --incremental-mode=none|checksum|lsn] [--db-include | --db-exclude] + [--destroy-all-other-dbs] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] @@ -95,7 +101,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--archive-port=port] [--archive-user=username] [--help] - pg_probackup validate -B backup-path [--instance=instance_name] + pg_probackup validate -B backup-dir [--instance=instance-name] [-i backup-id] [--progress] [-j num-threads] [--recovery-target-time=time|--recovery-target-xid=xid |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] @@ -104,18 +110,18 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--skip-block-validation] [--help] - pg_probackup checkdb [-B backup-path] [--instance=instance_name] + pg_probackup checkdb [-B backup-dir] [--instance=instance-name] [-D pgdata-path] [--progress] [-j num-threads] [--amcheck] [--skip-block-validation] [--heapallindexed] [--checkunique] [--help] - pg_probackup show -B backup-path - [--instance=instance_name [-i backup-id]] + pg_probackup show -B backup-dir + [--instance=instance-name [-i backup-id]] [--format=format] [--archive] [--no-color] [--help] - pg_probackup delete -B backup-path --instance=instance_name + pg_probackup delete -B backup-dir --instance=instance-name [-j num-threads] [--progress] [--retention-redundancy=retention-redundancy] [--retention-window=retention-window] @@ -125,24 +131,24 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--dry-run] [--no-validate] [--no-sync] [--help] - pg_probackup merge -B backup-path --instance=instance_name + pg_probackup merge -B backup-dir --instance=instance-name -i backup-id [--progress] [-j num-threads] [--no-validate] [--no-sync] [--help] - pg_probackup add-instance -B backup-path -D pgdata-path - --instance=instance_name + pg_probackup add-instance -B backup-dir -D pgdata-path + --instance=instance-name [--external-dirs=external-directories-paths] [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] [--help] - pg_probackup del-instance -B backup-path - --instance=instance_name + pg_probackup del-instance -B backup-dir + --instance=instance-name [--help] - pg_probackup archive-push -B backup-path --instance=instance_name + pg_probackup archive-push -B backup-dir --instance=instance-name --wal-file-name=wal-file-name [--wal-file-path=wal-file-path] [-j num-threads] [--batch-size=batch_size] @@ -156,7 +162,7 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--ssh-options] [--help] - pg_probackup archive-get -B backup-path --instance=instance_name + pg_probackup archive-get -B backup-dir --instance=instance-name --wal-file-path=wal-file-path --wal-file-name=wal-file-name [-j num-threads] [--batch-size=batch_size] @@ -178,7 +184,8 @@ pg_probackup - utility to manage backup/recovery of PostgreSQL database. [--remote-proto] [--remote-host] [--remote-port] [--remote-path] [--remote-user] [--ssh-options] + [--dry-run] [--help] -Read the website for details. +Read the website for details . Report bugs to . diff --git a/tests/expected/option_help_ru.out b/tests/expected/option_help_ru.out new file mode 100644 index 000000000..bd6d76970 --- /dev/null +++ b/tests/expected/option_help_ru.out @@ -0,0 +1,191 @@ + +pg_probackup - утилита для управления резервным копированием/восстановлением базы данных PostgreSQL. + + pg_probackup help [COMMAND] + + pg_probackup version + + pg_probackup init -B backup-dir + + pg_probackup set-config -B backup-dir --instance=instance-name + [-D pgdata-path] + [--external-dirs=external-directories-paths] + [--log-level-console=log-level-console] + [--log-level-file=log-level-file] + [--log-format-file=log-format-file] + [--log-filename=log-filename] + [--error-log-filename=error-log-filename] + [--log-directory=log-directory] + [--log-rotation-size=log-rotation-size] + [--log-rotation-age=log-rotation-age] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--archive-timeout=timeout] + [-d dbname] [-h host] [-p port] [-U username] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--restore-command=cmdline] [--archive-host=destination] + [--archive-port=port] [--archive-user=username] + [--help] + + pg_probackup set-backup -B backup-dir --instance=instance-name + -i backup-id [--ttl=interval] [--expire-time=timestamp] + [--note=text] + [--help] + + pg_probackup show-config -B backup-dir --instance=instance-name + [--format=format] + [--no-scale-units] + [--help] + + pg_probackup backup -B backup-dir -b backup-mode --instance=instance-name + [-D pgdata-path] [-C] + [--stream [-S slot-name] [--temp-slot]] + [--backup-pg-log] [-j num-threads] [--progress] + [--no-validate] [--skip-block-validation] + [--external-dirs=external-directories-paths] + [--no-sync] + [--log-level-console=log-level-console] + [--log-level-file=log-level-file] + [--log-format-console=log-format-console] + [--log-format-file=log-format-file] + [--log-filename=log-filename] + [--error-log-filename=error-log-filename] + [--log-directory=log-directory] + [--log-rotation-size=log-rotation-size] + [--log-rotation-age=log-rotation-age] [--no-color] + [--delete-expired] [--delete-wal] [--merge-expired] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [--compress] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--archive-timeout=archive-timeout] + [-d dbname] [-h host] [-p port] [-U username] + [-w --no-password] [-W --password] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--ttl=interval] [--expire-time=timestamp] [--note=text] + [--help] + + pg_probackup restore -B backup-dir --instance=instance-name + [-D pgdata-path] [-i backup-id] [-j num-threads] + [--recovery-target-time=time|--recovery-target-xid=xid + |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] + [--recovery-target-timeline=timeline] + [--recovery-target=immediate|latest] + [--recovery-target-name=target-name] + [--recovery-target-action=pause|promote|shutdown] + [--restore-command=cmdline] + [-R | --restore-as-replica] [--force] + [--primary-conninfo=primary_conninfo] + [-S | --primary-slot-name=slotname] + [--no-validate] [--skip-block-validation] + [-T OLDDIR=NEWDIR] [--progress] + [--external-mapping=OLDDIR=NEWDIR] + [--skip-external-dirs] [--no-sync] + [-X WALDIR | --waldir=WALDIR] + [-I | --incremental-mode=none|checksum|lsn] + [--db-include | --db-exclude] + [--destroy-all-other-dbs] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--archive-host=hostname] + [--archive-port=port] [--archive-user=username] + [--help] + + pg_probackup validate -B backup-dir [--instance=instance-name] + [-i backup-id] [--progress] [-j num-threads] + [--recovery-target-time=time|--recovery-target-xid=xid + |--recovery-target-lsn=lsn [--recovery-target-inclusive=boolean]] + [--recovery-target-timeline=timeline] + [--recovery-target-name=target-name] + [--skip-block-validation] + [--help] + + pg_probackup checkdb [-B backup-dir] [--instance=instance-name] + [-D pgdata-path] [--progress] [-j num-threads] + [--amcheck] [--skip-block-validation] + [--heapallindexed] [--checkunique] + [--help] + + pg_probackup show -B backup-dir + [--instance=instance-name [-i backup-id]] + [--format=format] [--archive] + [--no-color] [--help] + + pg_probackup delete -B backup-dir --instance=instance-name + [-j num-threads] [--progress] + [--retention-redundancy=retention-redundancy] + [--retention-window=retention-window] + [--wal-depth=wal-depth] + [-i backup-id | --delete-expired | --merge-expired | --status=backup_status] + [--delete-wal] + [--dry-run] [--no-validate] [--no-sync] + [--help] + + pg_probackup merge -B backup-dir --instance=instance-name + -i backup-id [--progress] [-j num-threads] + [--no-validate] [--no-sync] + [--help] + + pg_probackup add-instance -B backup-dir -D pgdata-path + --instance=instance-name + [--external-dirs=external-directories-paths] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup del-instance -B backup-dir + --instance=instance-name + [--help] + + pg_probackup archive-push -B backup-dir --instance=instance-name + --wal-file-name=wal-file-name + [--wal-file-path=wal-file-path] + [-j num-threads] [--batch-size=batch_size] + [--archive-timeout=timeout] + [--no-ready-rename] [--no-sync] + [--overwrite] [--compress] + [--compress-algorithm=compress-algorithm] + [--compress-level=compress-level] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup archive-get -B backup-dir --instance=instance-name + --wal-file-path=wal-file-path + --wal-file-name=wal-file-name + [-j num-threads] [--batch-size=batch_size] + [--no-validate-wal] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--help] + + pg_probackup catchup -b catchup-mode + --source-pgdata=path_to_pgdata_on_remote_server + --destination-pgdata=path_to_local_dir + [--stream [-S slot-name] [--temp-slot | --perm-slot]] + [-j num-threads] + [-T OLDDIR=NEWDIR] + [--exclude-path=path_prefix] + [-d dbname] [-h host] [-p port] [-U username] + [-w --no-password] [-W --password] + [--remote-proto] [--remote-host] + [--remote-port] [--remote-path] [--remote-user] + [--ssh-options] + [--dry-run] + [--help] + +Подробнее читайте на сайте . +Сообщайте об ошибках в . diff --git a/tests/expected/option_version.out b/tests/expected/option_version.out deleted file mode 100644 index 29cd93f45..000000000 --- a/tests/expected/option_version.out +++ /dev/null @@ -1 +0,0 @@ -pg_probackup 2.5.5 \ No newline at end of file diff --git a/tests/external.py b/tests/external_test.py similarity index 89% rename from tests/external.py rename to tests/external_test.py index 530e7fb26..53f3c5449 100644 --- a/tests/external.py +++ b/tests/external_test.py @@ -6,8 +6,6 @@ import shutil -module_name = 'external' - # TODO: add some ptrack tests class ExternalTest(ProbackupTest, unittest.TestCase): @@ -19,15 +17,14 @@ def test_basic_external(self): with external directory, restore backup, check that external directory was successfully copied """ - fname = self.id().split('.')[3] - core_dir = os.path.join(self.tmp_path, module_name, fname) + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') external_dir = self.get_tblspace_path(node, 'somedirectory') # create directory in external_directory @@ -91,9 +88,6 @@ def test_basic_external(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_external_none(self): @@ -103,13 +97,12 @@ def test_external_none(self): restore delta backup, check that external directory was not copied """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') external_dir = self.get_tblspace_path(node, 'somedirectory') # create directory in external_directory @@ -153,9 +146,6 @@ def test_external_none(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_external_dirs_overlapping(self): @@ -164,13 +154,12 @@ def test_external_dirs_overlapping(self): take backup with two external directories pointing to the same directory, backup should fail """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') external_dir1 = self.get_tblspace_path(node, 'external_dir1') external_dir2 = self.get_tblspace_path(node, 'external_dir2') @@ -207,9 +196,6 @@ def test_external_dirs_overlapping(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_external_dir_mapping(self): """ @@ -218,13 +204,12 @@ def test_external_dir_mapping(self): check that restore with external-dir mapping will end with success """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -247,7 +232,7 @@ def test_external_dir_mapping(self): data_dir=external_dir2, options=["-j", "4"]) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() external_dir1_new = self.get_tblspace_path(node_restored, 'external_dir1') @@ -300,20 +285,16 @@ def test_external_dir_mapping(self): node_restored.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_backup_multiple_external(self): """check that cmdline has priority over config""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -361,9 +342,6 @@ def test_backup_multiple_external(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_backward_compatibility(self): @@ -376,10 +354,9 @@ def test_external_backward_compatibility(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -447,7 +424,7 @@ def test_external_backward_compatibility(self): # RESTORE chain with new binary node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -466,9 +443,6 @@ def test_external_backward_compatibility(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_backward_compatibility_merge_1(self): @@ -480,10 +454,9 @@ def test_external_backward_compatibility_merge_1(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -542,7 +515,7 @@ def test_external_backward_compatibility_merge_1(self): # Restore merged backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -561,9 +534,6 @@ def test_external_backward_compatibility_merge_1(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_backward_compatibility_merge_2(self): @@ -575,10 +545,9 @@ def test_external_backward_compatibility_merge_2(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -666,7 +635,7 @@ def test_external_backward_compatibility_merge_2(self): # Restore merged backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -689,9 +658,6 @@ def test_external_backward_compatibility_merge_2(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge(self): @@ -699,10 +665,9 @@ def test_external_merge(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -779,17 +744,13 @@ def test_external_merge(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge_skip_external_dirs(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -877,17 +838,13 @@ def test_external_merge_skip_external_dirs(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge_1(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -957,17 +914,13 @@ def test_external_merge_1(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge_3(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1050,17 +1003,13 @@ def test_external_merge_3(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_merge_2(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1144,17 +1093,13 @@ def test_external_merge_2(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_restore_external_changed_data(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1242,17 +1187,13 @@ def test_restore_external_changed_data(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_restore_external_changed_data_1(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1349,17 +1290,13 @@ def test_restore_external_changed_data_1(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_merge_external_changed_data(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1452,19 +1389,15 @@ def test_merge_external_changed_data(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_restore_skip_external(self): """ Check that --skip-external-dirs works correctly """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1523,9 +1456,6 @@ def test_restore_skip_external(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_dir_is_symlink(self): @@ -1535,14 +1465,13 @@ def test_external_dir_is_symlink(self): but restored as directory """ if os.name == 'nt': - return unittest.skip('Skipped for Windows') + self.skipTest('Skipped for Windows') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1557,7 +1486,7 @@ def test_external_dir_is_symlink(self): backup_dir, 'node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, module_name, fname) + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) symlinked_dir = os.path.join(core_dir, 'symlinked') self.restore_node( @@ -1581,7 +1510,7 @@ def test_external_dir_is_symlink(self): node.base_dir, exclude_dirs=['logs']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # RESTORE node_restored.cleanup() @@ -1606,9 +1535,6 @@ def test_external_dir_is_symlink(self): backup_dir, 'node', backup_id=backup_id)['external-dirs']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_dir_contain_symlink_on_dir(self): @@ -1618,14 +1544,13 @@ def test_external_dir_contain_symlink_on_dir(self): but restored as directory """ if os.name == 'nt': - return unittest.skip('Skipped for Windows') + self.skipTest('Skipped for Windows') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1641,7 +1566,7 @@ def test_external_dir_contain_symlink_on_dir(self): backup_dir, 'node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, module_name, fname) + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) symlinked_dir = os.path.join(core_dir, 'symlinked') self.restore_node( @@ -1666,7 +1591,7 @@ def test_external_dir_contain_symlink_on_dir(self): node.base_dir, exclude_dirs=['logs']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # RESTORE node_restored.cleanup() @@ -1691,9 +1616,6 @@ def test_external_dir_contain_symlink_on_dir(self): backup_dir, 'node', backup_id=backup_id)['external-dirs']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_dir_contain_symlink_on_file(self): @@ -1703,14 +1625,13 @@ def test_external_dir_contain_symlink_on_file(self): but restored as directory """ if os.name == 'nt': - return unittest.skip('Skipped for Windows') + self.skipTest('Skipped for Windows') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1726,7 +1647,7 @@ def test_external_dir_contain_symlink_on_file(self): backup_dir, 'node', node, options=["-j", "4", "--stream"]) # fill some directory with data - core_dir = os.path.join(self.tmp_path, module_name, fname) + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) symlinked_dir = os.path.join(core_dir, 'symlinked') self.restore_node( @@ -1753,7 +1674,7 @@ def test_external_dir_contain_symlink_on_file(self): node.base_dir, exclude_dirs=['logs']) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # RESTORE node_restored.cleanup() @@ -1778,9 +1699,6 @@ def test_external_dir_contain_symlink_on_file(self): backup_dir, 'node', backup_id=backup_id)['external-dirs']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_external_dir_is_tablespace(self): @@ -1788,12 +1706,11 @@ def test_external_dir_is_tablespace(self): Check that backup fails with error if external directory points to tablespace """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1828,21 +1745,17 @@ def test_external_dir_is_tablespace(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_restore_external_dir_not_empty(self): """ Check that backup fails with error if external directory point to not empty tablespace and if remapped directory also isn`t empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1908,9 +1821,6 @@ def test_restore_external_dir_not_empty(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_restore_external_dir_is_missing(self): """ take FULL backup with not empty external directory @@ -1918,12 +1828,11 @@ def test_restore_external_dir_is_missing(self): take DELTA backup with external directory, which should fail """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1990,9 +1899,6 @@ def test_restore_external_dir_is_missing(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_external_dir_is_missing(self): """ take FULL backup with not empty external directory @@ -2003,12 +1909,11 @@ def test_merge_external_dir_is_missing(self): merge it into FULL, restore and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2078,9 +1983,6 @@ def test_merge_external_dir_is_missing(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_restore_external_dir_is_empty(self): """ take FULL backup with not empty external directory @@ -2089,12 +1991,11 @@ def test_restore_external_dir_is_empty(self): restore DELRA backup, check that restored external directory is empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2142,9 +2043,6 @@ def test_restore_external_dir_is_empty(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_external_dir_is_empty(self): """ take FULL backup with not empty external directory @@ -2153,12 +2051,11 @@ def test_merge_external_dir_is_empty(self): merge backups and restore FULL, check that restored external directory is empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2209,9 +2106,6 @@ def test_merge_external_dir_is_empty(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_restore_external_dir_string_order(self): """ take FULL backup with not empty external directory @@ -2220,12 +2114,11 @@ def test_restore_external_dir_string_order(self): restore DELRA backup, check that restored external directory is empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2289,9 +2182,6 @@ def test_restore_external_dir_string_order(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_external_dir_string_order(self): """ take FULL backup with not empty external directory @@ -2300,12 +2190,11 @@ def test_merge_external_dir_string_order(self): restore DELRA backup, check that restored external directory is empty """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - core_dir = os.path.join(self.tmp_path, module_name, fname) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + core_dir = os.path.join(self.tmp_path, self.module_name, self.fname) shutil.rmtree(core_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2372,9 +2261,6 @@ def test_merge_external_dir_string_order(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_smart_restore_externals(self): """ @@ -2383,13 +2269,12 @@ def test_smart_restore_externals(self): make sure that files from externals are not copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2451,9 +2336,6 @@ def test_smart_restore_externals(self): for file in filelist_diff: self.assertNotIn(file, logfile_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_external_validation(self): """ @@ -2462,13 +2344,12 @@ def test_external_validation(self): corrupt external file in backup, run validate which should fail """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2522,6 +2403,3 @@ def test_external_validation(self): 'CORRUPT', self.show_pb(backup_dir, 'node', full_id)['status'], 'Backup STATUS should be "CORRUPT"') - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/false_positive.py b/tests/false_positive_test.py similarity index 83% rename from tests/false_positive.py rename to tests/false_positive_test.py index a101f8107..ea82cb18f 100644 --- a/tests/false_positive.py +++ b/tests/false_positive_test.py @@ -1,13 +1,12 @@ import unittest import os +from time import sleep + from .helpers.ptrack_helpers import ProbackupTest, ProbackupException from datetime import datetime, timedelta import subprocess -module_name = 'false_positive' - - class FalsePositive(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -16,13 +15,12 @@ def test_validate_wal_lost_segment(self): """ Loose segment located between backups. ExpectedFailure. This is BUG """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -47,19 +45,15 @@ def test_validate_wal_lost_segment(self): backup_dir, 'node')) ######## - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.expectedFailure # Need to force validation of ancestor-chain def test_incremental_backup_corrupt_full_1(self): """page-level backup with corrupted full backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -104,9 +98,6 @@ def test_incremental_backup_corrupt_full_1(self): self.assertEqual( self.show_pb(backup_dir, 'node')[0]['Status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") @unittest.expectedFailure def test_pg_10_waldir(self): @@ -114,20 +105,20 @@ def test_pg_10_waldir(self): test group access for PG >= 11 """ if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') + self.skipTest('You need PostgreSQL >= 10 for this test') - fname = self.id().split('.')[3] wal_dir = os.path.join( - os.path.join(self.tmp_path, module_name, fname), 'wal_dir') + os.path.join(self.tmp_path, self.module_name, self.fname), 'wal_dir') + import shutil shutil.rmtree(wal_dir, ignore_errors=True) node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=[ '--data-checksums', '--waldir={0}'.format(wal_dir)]) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -140,7 +131,7 @@ def test_pg_10_waldir(self): # restore backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -154,9 +145,6 @@ def test_pg_10_waldir(self): os.path.islink(os.path.join(node_restored.data_dir, 'pg_wal')), 'pg_wal should be symlink') - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.expectedFailure # @unittest.skip("skip") def test_recovery_target_time_backup_victim(self): @@ -165,10 +153,9 @@ def test_recovery_target_time_backup_victim(self): probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -198,6 +185,7 @@ def test_recovery_target_time_backup_victim(self): gdb = self.backup_node(backup_dir, 'node', node, gdb=True) + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb.remove_all_breakpoints() @@ -215,21 +203,20 @@ def test_recovery_target_time_backup_victim(self): backup_dir, 'node', options=['--recovery-target-time={0}'.format(target_time)]) - # Clean after yourself - self.del_test_dir(module_name, fname) - - @unittest.expectedFailure + # @unittest.expectedFailure # @unittest.skip("skip") def test_recovery_target_lsn_backup_victim(self): """ Check that for validation to recovery target probackup chooses valid backup https://github.com/postgrespro/pg_probackup/issues/104 + + @y.sokolov: looks like this test should pass. + So I commented 'expectedFailure' """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -257,6 +244,7 @@ def test_recovery_target_lsn_backup_victim(self): backup_dir, 'node', node, options=['--log-level-console=LOG'], gdb=True) + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb.remove_all_breakpoints() @@ -278,9 +266,6 @@ def test_recovery_target_lsn_backup_victim(self): backup_dir, 'node', options=['--recovery-target-lsn={0}'.format(target_lsn)]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") @unittest.expectedFailure def test_streaming_timeout(self): @@ -289,10 +274,9 @@ def test_streaming_timeout(self): message because our WAL streaming engine is "borrowed" from pg_receivexlog """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -308,6 +292,7 @@ def test_streaming_timeout(self): backup_dir, 'node', node, gdb=True, options=['--stream', '--log-level-file=LOG']) + # Attention! This breakpoint is set to a probackup internal fuction, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() @@ -328,20 +313,16 @@ def test_streaming_timeout(self): 'ERROR: Problem in receivexlog', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") @unittest.expectedFailure def test_validate_all_empty_catalog(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) try: @@ -357,6 +338,3 @@ def test_validate_all_empty_catalog(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py index ac64c4230..2e5ed40e8 100644 --- a/tests/helpers/__init__.py +++ b/tests/helpers/__init__.py @@ -1,2 +1,9 @@ -__all__ = ['ptrack_helpers', 'cfs_helpers', 'expected_errors'] -#from . import * \ No newline at end of file +__all__ = ['ptrack_helpers', 'cfs_helpers', 'data_helpers'] + +import unittest + +# python 2.7 compatibility +if not hasattr(unittest.TestCase, "skipTest"): + def skipTest(self, reason): + raise unittest.SkipTest(reason) + unittest.TestCase.skipTest = skipTest \ No newline at end of file diff --git a/tests/helpers/data_helpers.py b/tests/helpers/data_helpers.py new file mode 100644 index 000000000..27cb66c3d --- /dev/null +++ b/tests/helpers/data_helpers.py @@ -0,0 +1,78 @@ +import re +import unittest +import functools +import time + +def _tail_file(file, linetimeout, totaltimeout): + start = time.time() + with open(file, 'r') as f: + waits = 0 + while waits < linetimeout: + line = f.readline() + if line == '': + waits += 1 + time.sleep(1) + continue + waits = 0 + yield line + if time.time() - start > totaltimeout: + raise TimeoutError("total timeout tailing %s" % (file,)) + else: + raise TimeoutError("line timeout tailing %s" % (file,)) + + +class tail_file(object): # snake case to immitate function + def __init__(self, filename, *, linetimeout=10, totaltimeout=60, collect=False): + self.filename = filename + self.tailer = _tail_file(filename, linetimeout, totaltimeout) + self.collect = collect + self.lines = [] + self._content = None + + def __iter__(self): + return self + + def __next__(self): + line = next(self.tailer) + if self.collect: + self.lines.append(line) + self._content = None + return line + + @property + def content(self): + if not self.collect: + raise AttributeError("content collection is not enabled", + name="content", obj=self) + if not self._content: + self._content = "".join(self.lines) + return self._content + + def drop_content(self): + self.lines.clear() + self._content = None + + def stop_collect(self): + self.drop_content() + self.collect = False + + def wait(self, *, contains:str = None, regex:str = None): + assert contains != None or regex != None + assert contains == None or regex == None + try: + for line in self: + if contains is not None and contains in line: + break + if regex is not None and re.search(regex, line): + break + except TimeoutError: + msg = "Didn't found expected " + if contains is not None: + msg += repr(contains) + elif regex is not None: + msg += f"/{regex}/" + msg += f" in {self.filename}" + raise unittest.TestCase.failureException(msg) + + def wait_shutdown(self): + self.wait(contains='database system is shut down') diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 3b14b7170..27d982856 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -1,7 +1,9 @@ # you need os for unittest to work import os import gc +import unittest from sys import exit, argv, version_info +import signal import subprocess import shutil import six @@ -13,6 +15,7 @@ from time import sleep import re import json +import random idx_ptrack = { 't_heap': { @@ -87,25 +90,56 @@ def dir_files(base_dir): return out_list +def is_pgpro(): + # pg_config --help + cmd = [os.environ['PG_CONFIG'], '--help'] + + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + return b'postgrespro' in result.stdout + + def is_enterprise(): # pg_config --help - if os.name == 'posix': - cmd = [os.environ['PG_CONFIG'], '--help'] - - elif os.name == 'nt': - cmd = [[os.environ['PG_CONFIG']], ['--help']] - - p = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - if b'postgrespro.ru' in p.communicate()[0]: - return True - else: + cmd = [os.environ['PG_CONFIG'], '--help'] + + p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + # PostgresPro std or ent + if b'postgrespro' in p.stdout: + cmd = [os.environ['PG_CONFIG'], '--pgpro-edition'] + p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + + return b'enterprise' in p.stdout + else: # PostgreSQL return False +def is_nls_enabled(): + cmd = [os.environ['PG_CONFIG'], '--configure'] + + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) + return b'enable-nls' in result.stdout + + +def base36enc(number): + """Converts an integer to a base36 string.""" + alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + base36 = '' + sign = '' + + if number < 0: + sign = '-' + number = -number + + if 0 <= number < len(alphabet): + return sign + alphabet[number] + + while number != 0: + number, i = divmod(number, len(alphabet)) + base36 = alphabet[i] + base36 + + return sign + base36 + + class ProbackupException(Exception): def __init__(self, message, cmd): self.message = message @@ -114,46 +148,105 @@ def __init__(self, message, cmd): def __str__(self): return '\n ERROR: {0}\n CMD: {1}'.format(repr(self.message), self.cmd) +class PostgresNodeExtended(testgres.PostgresNode): -def slow_start(self, replica=False): + def __init__(self, base_dir=None, *args, **kwargs): + super(PostgresNodeExtended, self).__init__(name='test', base_dir=base_dir, *args, **kwargs) + self.is_started = False - # wait for https://github.com/postgrespro/testgres/pull/50 -# self.start() -# self.poll_query_until( -# "postgres", -# "SELECT not pg_is_in_recovery()", -# suppress={testgres.NodeConnection}) - if replica: - query = 'SELECT pg_is_in_recovery()' - else: - query = 'SELECT not pg_is_in_recovery()' + def slow_start(self, replica=False): - self.start() - while True: - try: - output = self.safe_psql('template1', query).decode("utf-8").rstrip() + # wait for https://github.com/postgrespro/testgres/pull/50 + # self.start() + # self.poll_query_until( + # "postgres", + # "SELECT not pg_is_in_recovery()", + # suppress={testgres.NodeConnection}) + if replica: + query = 'SELECT pg_is_in_recovery()' + else: + query = 'SELECT not pg_is_in_recovery()' + + self.start() + while True: + try: + output = self.safe_psql('template1', query).decode("utf-8").rstrip() + + if output == 't': + break + + except testgres.QueryException as e: + if 'database system is starting up' in e.message: + pass + elif 'FATAL: the database system is not accepting connections' in e.message: + pass + elif replica and 'Hot standby mode is disabled' in e.message: + raise e + else: + raise e + + sleep(0.5) + + def start(self, *args, **kwargs): + if not self.is_started: + super(PostgresNodeExtended, self).start(*args, **kwargs) + self.is_started = True + return self + + def stop(self, *args, **kwargs): + if self.is_started: + result = super(PostgresNodeExtended, self).stop(*args, **kwargs) + self.is_started = False + return result + + def kill(self, someone = None): + if self.is_started: + sig = signal.SIGKILL if os.name != 'nt' else signal.SIGBREAK + if someone == None: + os.kill(self.pid, sig) + else: + os.kill(self.auxiliary_pids[someone][0], sig) + self.is_started = False + + def table_checksum(self, table, dbname="postgres"): + con = self.connect(dbname=dbname) + + curname = "cur_"+str(random.randint(0,2**48)) - if output == 't': + con.execute(""" + DECLARE %s NO SCROLL CURSOR FOR + SELECT t::text FROM %s as t + """ % (curname, table)) + + sum = hashlib.md5() + while True: + rows = con.execute("FETCH FORWARD 5000 FROM %s" % curname) + if not rows: break + for row in rows: + # hash uses SipHash since Python3.4, therefore it is good enough + sum.update(row[0].encode('utf8')) - except testgres.QueryException as e: - if 'database system is starting up' in e.message: - pass - elif 'FATAL: the database system is not accepting connections' in e.message: - pass - elif replica and 'Hot standby mode is disabled' in e.message: - raise e - else: - raise e + con.execute("CLOSE %s; ROLLBACK;" % curname) - sleep(0.5) + con.close() + return sum.hexdigest() class ProbackupTest(object): # Class attributes enterprise = is_enterprise() + enable_nls = is_nls_enabled() + pgpro = is_pgpro() def __init__(self, *args, **kwargs): super(ProbackupTest, self).__init__(*args, **kwargs) + + self.nodes_to_cleanup = [] + + if isinstance(self, unittest.TestCase): + self.module_name = self.id().split('.')[1] + self.fname = self.id().split('.')[3] + if '-v' in argv or '--verbose' in argv: self.verbose = True else: @@ -184,8 +277,8 @@ def __init__(self, *args, **kwargs): self.test_env['LC_MESSAGES'] = 'C' self.test_env['LC_TIME'] = 'C' - self.gdb = 'PGPROBACKUP_GDB' in os.environ and \ - os.environ['PGPROBACKUP_GDB'] == 'ON' + self.gdb = 'PGPROBACKUP_GDB' in self.test_env and \ + self.test_env['PGPROBACKUP_GDB'] == 'ON' self.paranoia = 'PG_PROBACKUP_PARANOIA' in self.test_env and \ self.test_env['PG_PROBACKUP_PARANOIA'] == 'ON' @@ -217,10 +310,7 @@ def __init__(self, *args, **kwargs): self.user = self.get_username() self.probackup_path = None if 'PGPROBACKUPBIN' in self.test_env: - if ( - os.path.isfile(self.test_env["PGPROBACKUPBIN"]) and - os.access(self.test_env["PGPROBACKUPBIN"], os.X_OK) - ): + if shutil.which(self.test_env["PGPROBACKUPBIN"]): self.probackup_path = self.test_env["PGPROBACKUPBIN"] else: if self.verbose: @@ -317,6 +407,49 @@ def __init__(self, *args, **kwargs): os.environ["PGAPPNAME"] = "pg_probackup" + def is_test_result_ok(test_case): + # sources of solution: + # 1. python versions 2.7 - 3.10, verified on 3.10, 3.7, 2.7, taken from: + # https://tousu.in/qa/?qa=555402/unit-testing-getting-pythons-unittest-results-in-a-teardown-method&show=555403#a555403 + # + # 2. python versions 3.11+ mixin, verified on 3.11, taken from: https://stackoverflow.com/a/39606065 + + if not isinstance(test_case, unittest.TestCase): + raise AssertionError("test_case is not instance of unittest.TestCase") + + if hasattr(test_case, '_outcome'): # Python 3.4+ + if hasattr(test_case._outcome, 'errors'): + # Python 3.4 - 3.10 (These two methods have no side effects) + result = test_case.defaultTestResult() # These two methods have no side effects + test_case._feedErrorsToResult(result, test_case._outcome.errors) + else: + # Python 3.11+ and pytest 5.3.5+ + result = test_case._outcome.result + if not hasattr(result, 'errors'): + result.errors = [] + if not hasattr(result, 'failures'): + result.failures = [] + else: # Python 2.7, 3.0-3.3 + result = getattr(test_case, '_outcomeForDoCleanups', test_case._resultForDoCleanups) + + ok = all(test != test_case for test, text in result.errors + result.failures) + + return ok + + def tearDown(self): + if self.is_test_result_ok(): + for node in self.nodes_to_cleanup: + node.cleanup() + self.del_test_dir(self.module_name, self.fname) + + else: + for node in self.nodes_to_cleanup: + # TODO make decorator with proper stop() vs cleanup() + node._try_shutdown(max_attempts=1) + # node.cleanup() + + self.nodes_to_cleanup.clear() + @property def pg_config_version(self): return self.version_to_num( @@ -347,10 +480,10 @@ def make_empty_node( shutil.rmtree(real_base_dir, ignore_errors=True) os.makedirs(real_base_dir) - node = testgres.get_new_node('test', base_dir=real_base_dir) - # bound method slow_start() to 'node' class instance - node.slow_start = slow_start.__get__(node) + node = PostgresNodeExtended(base_dir=real_base_dir) node.should_rm_dirs = True + self.nodes_to_cleanup.append(node) + return node def make_simple_node( @@ -413,6 +546,7 @@ def make_simple_node( if node.major_version >= 13: self.set_auto_conf( node, {}, 'postgresql.conf', ['wal_keep_segments']) + return node def simple_bootstrap(self, node, role) -> None: @@ -451,8 +585,8 @@ def simple_bootstrap(self, node, role) -> None: 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'postgres', 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' @@ -467,6 +601,22 @@ def simple_bootstrap(self, node, role) -> None: 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + # >= 15 + else: + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): res = node.execute( @@ -814,7 +964,7 @@ def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, retur if self.verbose: print(self.cmd) if gdb: - return GDBobj([binary_path] + command, self.verbose) + return GDBobj([binary_path] + command, self) if asynchronous: return subprocess.Popen( [binary_path] + command, @@ -836,7 +986,8 @@ def run_pb(self, command, asynchronous=False, gdb=False, old_binary=False, retur else: return self.output except subprocess.CalledProcessError as e: - raise ProbackupException(e.output.decode('utf-8'), self.cmd) + raise ProbackupException(e.output.decode('utf-8').replace("\r",""), + self.cmd) def run_binary(self, command, asynchronous=False, env=None): @@ -1547,7 +1698,7 @@ def version_to_num(self, version): parts.append('0') num = 0 for part in parts: - num = num * 100 + int(re.sub("[^\d]", "", part)) + num = num * 100 + int(re.sub(r"[^\d]", "", part)) return num def switch_wal_segment(self, node): @@ -1610,16 +1761,9 @@ def get_ptrack_version(self, node): def get_bin_path(self, binary): return testgres.get_bin_path(binary) - def clean_all(self): - for o in gc.get_referrers(testgres.PostgresNode): - if o.__class__ is testgres.PostgresNode: - o.cleanup() - def del_test_dir(self, module_name, fname): """ Del testdir and optimistically try to del module dir""" - self.clean_all() - shutil.rmtree( os.path.join( self.tmp_path, @@ -1643,7 +1787,7 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): 'ptrack_control', 'ptrack_init', 'pg_control', 'probackup_recovery.conf', 'recovery.signal', 'standby.signal', 'ptrack.map', 'ptrack.map.mmap', - 'ptrack.map.tmp' + 'ptrack.map.tmp', 'recovery.done','backup_label.old' ] if exclude_dirs: @@ -1666,54 +1810,52 @@ def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None): file_fullpath = os.path.join(root, file) file_relpath = os.path.relpath(file_fullpath, pgdata) - directory_dict['files'][file_relpath] = {'is_datafile': False} + cfile = ContentFile(file.isdigit()) + directory_dict['files'][file_relpath] = cfile with open(file_fullpath, 'rb') as f: - directory_dict['files'][file_relpath]['md5'] = hashlib.md5(f.read()).hexdigest() - f.close() -# directory_dict['files'][file_relpath]['md5'] = hashlib.md5( -# f = open(file_fullpath, 'rb').read()).hexdigest() + # truncate cfm's content's zero tail + if file_relpath.endswith('.cfm'): + content = f.read() + zero64 = b"\x00"*64 + l = len(content) + while l > 64: + s = (l - 1) & ~63 + if content[s:l] != zero64[:l-s]: + break + l = s + content = content[:l] + digest = hashlib.md5(content) + else: + digest = hashlib.md5() + while True: + b = f.read(64*1024) + if not b: break + digest.update(b) + cfile.md5 = digest.hexdigest() # crappy algorithm - if file.isdigit(): - directory_dict['files'][file_relpath]['is_datafile'] = True + if cfile.is_datafile: size_in_pages = os.path.getsize(file_fullpath)/8192 - directory_dict['files'][file_relpath][ - 'md5_per_page'] = self.get_md5_per_page_for_fork( + cfile.md5_per_page = self.get_md5_per_page_for_fork( file_fullpath, size_in_pages ) - for root, dirs, files in os.walk(pgdata, topdown=False, followlinks=True): for directory in dirs: directory_path = os.path.join(root, directory) directory_relpath = os.path.relpath(directory_path, pgdata) - - found = False - for d in dirs_to_ignore: - if d in directory_relpath: - found = True - break - - # check if directory already here as part of larger directory - if not found: - for d in directory_dict['dirs']: - # print("OLD dir {0}".format(d)) - if directory_relpath in d: - found = True - break - - if not found: - directory_dict['dirs'][directory_relpath] = {} + parent = os.path.dirname(directory_relpath) + if parent in directory_dict['dirs']: + del directory_dict['dirs'][parent] + directory_dict['dirs'][directory_relpath] = ContentDir() # get permissions for every file and directory - for file in directory_dict['dirs']: - full_path = os.path.join(pgdata, file) - directory_dict['dirs'][file]['mode'] = os.stat( - full_path).st_mode + for dir, cdir in directory_dict['dirs'].items(): + full_path = os.path.join(pgdata, dir) + cdir.mode = os.stat(full_path).st_mode - for file in directory_dict['files']: + for file, cfile in directory_dict['files'].items(): full_path = os.path.join(pgdata, file) - directory_dict['files'][file]['mode'] = os.stat( - full_path).st_mode + cfile.mode = os.stat(full_path).st_mode return directory_dict @@ -1745,142 +1887,148 @@ def compare_pgdata(self, original_pgdata, restored_pgdata, exclusion_dict = dict error_message = 'Restored PGDATA is not equal to original!\n' # Compare directories - for directory in restored_pgdata['dirs']: - if directory not in original_pgdata['dirs']: - fail = True - error_message += '\nDirectory was not present' - error_message += ' in original PGDATA: {0}\n'.format( - os.path.join(restored_pgdata['pgdata'], directory)) - else: - if ( - restored_pgdata['dirs'][directory]['mode'] != - original_pgdata['dirs'][directory]['mode'] - ): - fail = True - error_message += '\nDir permissions mismatch:\n' - error_message += ' Dir old: {0} Permissions: {1}\n'.format( - os.path.join(original_pgdata['pgdata'], directory), - original_pgdata['dirs'][directory]['mode']) - error_message += ' Dir new: {0} Permissions: {1}\n'.format( - os.path.join(restored_pgdata['pgdata'], directory), - restored_pgdata['dirs'][directory]['mode']) - - for directory in original_pgdata['dirs']: - if directory not in restored_pgdata['dirs']: + restored_dirs = set(restored_pgdata['dirs']) + original_dirs = set(original_pgdata['dirs']) + + for directory in sorted(restored_dirs - original_dirs): + fail = True + error_message += '\nDirectory was not present' + error_message += ' in original PGDATA: {0}\n'.format( + os.path.join(restored_pgdata['pgdata'], directory)) + + for directory in sorted(original_dirs - restored_dirs): + fail = True + error_message += '\nDirectory dissappeared' + error_message += ' in restored PGDATA: {0}\n'.format( + os.path.join(restored_pgdata['pgdata'], directory)) + + for directory in sorted(original_dirs & restored_dirs): + original = original_pgdata['dirs'][directory] + restored = restored_pgdata['dirs'][directory] + if original.mode != restored.mode: fail = True - error_message += '\nDirectory dissappeared' - error_message += ' in restored PGDATA: {0}\n'.format( - os.path.join(restored_pgdata['pgdata'], directory)) - - for file in restored_pgdata['files']: + error_message += '\nDir permissions mismatch:\n' + error_message += ' Dir old: {0} Permissions: {1}\n'.format( + os.path.join(original_pgdata['pgdata'], directory), + original.mode) + error_message += ' Dir new: {0} Permissions: {1}\n'.format( + os.path.join(restored_pgdata['pgdata'], directory), + restored.mode) + + restored_files = set(restored_pgdata['files']) + original_files = set(original_pgdata['files']) + + for file in sorted(restored_files - original_files): # File is present in RESTORED PGDATA # but not present in ORIGINAL # only backup_label is allowed - if file not in original_pgdata['files']: - fail = True - error_message += '\nFile is not present' - error_message += ' in original PGDATA: {0}\n'.format( - os.path.join(restored_pgdata['pgdata'], file)) - - for file in original_pgdata['files']: - if file in restored_pgdata['files']: + fail = True + error_message += '\nFile is not present' + error_message += ' in original PGDATA: {0}\n'.format( + os.path.join(restored_pgdata['pgdata'], file)) + + for file in sorted(original_files - restored_files): + error_message += ( + '\nFile disappearance.\n ' + 'File: {0}\n').format( + os.path.join(restored_pgdata['pgdata'], file) + ) + fail = True - if ( - restored_pgdata['files'][file]['mode'] != - original_pgdata['files'][file]['mode'] - ): + for file in sorted(original_files & restored_files): + original = original_pgdata['files'][file] + restored = restored_pgdata['files'][file] + if restored.mode != original.mode: + fail = True + error_message += '\nFile permissions mismatch:\n' + error_message += ' File_old: {0} Permissions: {1:o}\n'.format( + os.path.join(original_pgdata['pgdata'], file), + original.mode) + error_message += ' File_new: {0} Permissions: {1:o}\n'.format( + os.path.join(restored_pgdata['pgdata'], file), + restored.mode) + + if original.md5 != restored.md5: + if file not in exclusion_dict: fail = True - error_message += '\nFile permissions mismatch:\n' - error_message += ' File_old: {0} Permissions: {1:o}\n'.format( + error_message += ( + '\nFile Checksum mismatch.\n' + 'File_old: {0}\nChecksum_old: {1}\n' + 'File_new: {2}\nChecksum_new: {3}\n').format( os.path.join(original_pgdata['pgdata'], file), - original_pgdata['files'][file]['mode']) - error_message += ' File_new: {0} Permissions: {1:o}\n'.format( + original.md5, os.path.join(restored_pgdata['pgdata'], file), - restored_pgdata['files'][file]['mode']) + restored.md5 + ) - if ( - original_pgdata['files'][file]['md5'] != - restored_pgdata['files'][file]['md5'] - ): - if file not in exclusion_dict: - fail = True - error_message += ( - '\nFile Checksum mismatch.\n' - 'File_old: {0}\nChecksum_old: {1}\n' - 'File_new: {2}\nChecksum_new: {3}\n').format( - os.path.join(original_pgdata['pgdata'], file), - original_pgdata['files'][file]['md5'], - os.path.join(restored_pgdata['pgdata'], file), - restored_pgdata['files'][file]['md5'] - ) + if not original.is_datafile: + continue - if original_pgdata['files'][file]['is_datafile']: - for page in original_pgdata['files'][file]['md5_per_page']: - if page not in restored_pgdata['files'][file]['md5_per_page']: - error_message += ( - '\n Page {0} dissappeared.\n ' - 'File: {1}\n').format( - page, - os.path.join( - restored_pgdata['pgdata'], - file - ) - ) - continue - - if not (file in exclusion_dict and page in exclusion_dict[file]): - if ( - original_pgdata['files'][file]['md5_per_page'][page] != - restored_pgdata['files'][file]['md5_per_page'][page] - ): - fail = True - error_message += ( - '\n Page checksum mismatch: {0}\n ' - ' PAGE Checksum_old: {1}\n ' - ' PAGE Checksum_new: {2}\n ' - ' File: {3}\n' - ).format( - page, - original_pgdata['files'][file][ - 'md5_per_page'][page], - restored_pgdata['files'][file][ - 'md5_per_page'][page], - os.path.join( - restored_pgdata['pgdata'], file) - ) - for page in restored_pgdata['files'][file]['md5_per_page']: - if page not in original_pgdata['files'][file]['md5_per_page']: - error_message += '\n Extra page {0}\n File: {1}\n'.format( - page, - os.path.join( - restored_pgdata['pgdata'], file)) + original_pages = set(original.md5_per_page) + restored_pages = set(restored.md5_per_page) - else: - error_message += ( - '\nFile disappearance.\n ' - 'File: {0}\n').format( - os.path.join(restored_pgdata['pgdata'], file) + for page in sorted(original_pages - restored_pages): + error_message += '\n Page {0} dissappeared.\n File: {1}\n'.format( + page, + os.path.join(restored_pgdata['pgdata'], file) ) - fail = True + + + for page in sorted(restored_pages - original_pages): + error_message += '\n Extra page {0}\n File: {1}\n'.format( + page, + os.path.join(restored_pgdata['pgdata'], file)) + + for page in sorted(original_pages & restored_pages): + if file in exclusion_dict and page in exclusion_dict[file]: + continue + + if original.md5_per_page[page] != restored.md5_per_page[page]: + fail = True + error_message += ( + '\n Page checksum mismatch: {0}\n ' + ' PAGE Checksum_old: {1}\n ' + ' PAGE Checksum_new: {2}\n ' + ' File: {3}\n' + ).format( + page, + original.md5_per_page[page], + restored.md5_per_page[page], + os.path.join( + restored_pgdata['pgdata'], file) + ) + self.assertFalse(fail, error_message) def gdb_attach(self, pid): - return GDBobj([str(pid)], self.verbose, attach=True) + return GDBobj([str(pid)], self, attach=True) + + def _check_gdb_flag_or_skip_test(self): + if not self.gdb: + self.skipTest( + "Specify PGPROBACKUP_GDB and build without " + "optimizations for run this test" + ) class GdbException(Exception): - def __init__(self, message=False): + def __init__(self, message="False"): self.message = message def __str__(self): return '\n ERROR: {0}\n'.format(repr(self.message)) -class GDBobj(ProbackupTest): - def __init__(self, cmd, verbose, attach=False): - self.verbose = verbose +class GDBobj: + def __init__(self, cmd, env, attach=False): + self.verbose = env.verbose self.output = '' + # Check gdb flag is set up + if not env.gdb: + raise GdbException("No `PGPROBACKUP_GDB=on` is set, " + "test should call ProbackupTest::check_gdb_flag_or_skip_test() on its start " + "and be skipped") # Check gdb presense try: gdb_version, _ = subprocess.Popen( @@ -1903,7 +2051,7 @@ def __init__(self, cmd, verbose, attach=False): # Get version gdb_version_number = re.search( - b"^GNU gdb [^\d]*(\d+)\.(\d)", + br"^GNU gdb [^\d]*(\d+)\.(\d)", gdb_version) self.major_version = int(gdb_version_number.group(1)) self.minor_version = int(gdb_version_number.group(2)) @@ -1917,7 +2065,8 @@ def __init__(self, cmd, verbose, attach=False): stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, - universal_newlines=True + text=True, + errors='replace', ) self.gdb_pid = self.proc.pid @@ -2101,3 +2250,10 @@ def _execute(self, cmd, running=True): # if running and line.startswith('*running'): break return output +class ContentFile(object): + __slots__ = ('is_datafile', 'mode', 'md5', 'md5_per_page') + def __init__(self, is_datafile: bool): + self.is_datafile = is_datafile + +class ContentDir(object): + __slots__ = ('mode') diff --git a/tests/incr_restore.py b/tests/incr_restore_test.py similarity index 78% rename from tests/incr_restore.py rename to tests/incr_restore_test.py index cb684a23a..6a2164098 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore_test.py @@ -9,23 +9,20 @@ import hashlib import shutil import json -from testgres import QueryException - - -module_name = 'incr_restore' - +from testgres import QueryException, StartNodeException +import stat +from stat import S_ISDIR class IncrRestoreTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_basic_incr_restore(self): """incremental restore in CHECKSUM mode""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -76,18 +73,14 @@ def test_basic_incr_restore(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_incr_restore_into_missing_directory(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -124,19 +117,15 @@ def test_basic_incr_restore_into_missing_directory(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_checksum_corruption_detection(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -181,20 +170,16 @@ def test_checksum_corruption_detection(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -224,19 +209,15 @@ def test_incr_restore_with_tablespace(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_1(self): """recovery to target timeline""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -282,22 +263,18 @@ def test_incr_restore_with_tablespace_1(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_2(self): """ If "--tablespace-mapping" option is used with incremental restore, then new directory must be empty. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -305,7 +282,7 @@ def test_incr_restore_with_tablespace_2(self): self.backup_node(backup_dir, 'node', node, options=['--stream']) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) # fill node1 with data out = self.restore_node( @@ -355,20 +332,16 @@ def test_incr_restore_with_tablespace_2(self): pgdata_restored = self.pgdata_content(node_1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_3(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -396,21 +369,17 @@ def test_incr_restore_with_tablespace_3(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_4(self): """ Check that system ID mismatch is detected, """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -427,7 +396,7 @@ def test_incr_restore_with_tablespace_4(self): # recreate node node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) node.slow_start() @@ -469,9 +438,6 @@ def test_incr_restore_with_tablespace_4(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure @unittest.skip("skip") def test_incr_restore_with_tablespace_5(self): @@ -481,13 +447,12 @@ def test_incr_restore_with_tablespace_5(self): with some old content, that belongs to an instance with different system id. """ - fname = self.id().split('.')[3] node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) node1.slow_start() @@ -503,7 +468,7 @@ def test_incr_restore_with_tablespace_5(self): # recreate node node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2'), + base_dir=os.path.join(self.module_name, self.fname, 'node2'), set_replication=True, initdb_params=['--data-checksums']) node2.slow_start() @@ -530,21 +495,17 @@ def test_incr_restore_with_tablespace_5(self): pgdata_restored = self.pgdata_content(node1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_6(self): """ Empty pgdata, not empty tablespace """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -591,22 +552,18 @@ def test_incr_restore_with_tablespace_6(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_restore_with_tablespace_7(self): """ Restore backup without tablespace into PGDATA with tablespace. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -647,19 +604,15 @@ def test_incr_restore_with_tablespace_7(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_incr_restore_sanity(self): """recovery to target timeline""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -690,7 +643,7 @@ def test_basic_incr_restore_sanity(self): repr(e.message), self.cmd)) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) try: self.restore_node( @@ -714,9 +667,6 @@ def test_basic_incr_restore_sanity(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_checksum_restore(self): """ @@ -725,13 +675,12 @@ def test_incr_checksum_restore(self): X - is instance, we want to return it to C state. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -758,7 +707,7 @@ def test_incr_checksum_restore(self): node.stop(['-m', 'immediate', '-D', node.data_dir]) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) node_1.cleanup() self.restore_node( @@ -803,9 +752,6 @@ def test_incr_checksum_restore(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_lsn_restore(self): @@ -815,13 +761,12 @@ def test_incr_lsn_restore(self): X - is instance, we want to return it to C state. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -848,7 +793,7 @@ def test_incr_lsn_restore(self): node.stop(['-m', 'immediate', '-D', node.data_dir]) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) node_1.cleanup() self.restore_node( @@ -892,9 +837,6 @@ def test_incr_lsn_restore(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_lsn_sanity(self): """ @@ -904,13 +846,12 @@ def test_incr_lsn_sanity(self): X - is instance, we want to return it to state B. fail is expected behaviour in case of lsn restore. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -920,7 +861,7 @@ def test_incr_lsn_sanity(self): node.pgbench_init(scale=10) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) node_1.cleanup() self.restore_node( @@ -961,9 +902,6 @@ def test_incr_lsn_sanity(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_checksum_sanity(self): """ @@ -972,12 +910,11 @@ def test_incr_checksum_sanity(self): X - is instance, we want to return it to state B. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -987,7 +924,7 @@ def test_incr_checksum_sanity(self): node.pgbench_init(scale=20) node_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_1')) node_1.cleanup() self.restore_node( @@ -1019,22 +956,17 @@ def test_incr_checksum_sanity(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") def test_incr_checksum_corruption_detection(self): """ check that corrupted page got detected and replaced """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), # initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1078,21 +1010,17 @@ def test_incr_checksum_corruption_detection(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_incr_lsn_corruption_detection(self): """ check that corrupted page got detected and replaced """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1136,20 +1064,16 @@ def test_incr_lsn_corruption_detection(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_restore_multiple_external(self): """check that cmdline has priority over config""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1207,20 +1131,16 @@ def test_incr_restore_multiple_external(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_lsn_restore_multiple_external(self): """check that cmdline has priority over config""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1278,22 +1198,18 @@ def test_incr_lsn_restore_multiple_external(self): node.base_dir, exclude_dirs=['logs']) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_lsn_restore_backward(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on', 'hot_standby': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1386,23 +1302,19 @@ def test_incr_lsn_restore_backward(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(delta_pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_checksum_restore_backward(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'hot_standby': 'on'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1478,23 +1390,18 @@ def test_incr_checksum_restore_backward(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(delta_pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_make_replica_via_incr_checksum_restore(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1503,7 +1410,7 @@ def test_make_replica_via_incr_checksum_restore(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() master.pgbench_init(scale=20) @@ -1551,23 +1458,18 @@ def test_make_replica_via_incr_checksum_restore(self): pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_make_replica_via_incr_lsn_restore(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1576,7 +1478,7 @@ def test_make_replica_via_incr_lsn_restore(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() master.pgbench_init(scale=20) @@ -1624,20 +1526,16 @@ def test_make_replica_via_incr_lsn_restore(self): pgbench = new_master.pgbench(options=['-T', '10', '-c', '1']) pgbench.wait() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_checksum_long_xact(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1694,9 +1592,6 @@ def test_incr_checksum_long_xact(self): 'select count(*) from t1').decode('utf-8').rstrip(), '1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure # This test will pass with Enterprise @@ -1705,12 +1600,11 @@ def test_incr_checksum_long_xact(self): def test_incr_lsn_long_xact_1(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1774,24 +1668,20 @@ def test_incr_lsn_long_xact_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_lsn_long_xact_2(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'full_page_writes': 'off', 'wal_log_hints': 'off'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1861,21 +1751,17 @@ def test_incr_lsn_long_xact_2(self): 'select count(*) from t1').decode('utf-8').rstrip(), '1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_restore_zero_size_file_checksum(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1934,21 +1820,17 @@ def test_incr_restore_zero_size_file_checksum(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata3, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_incr_restore_zero_size_file_lsn(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2013,15 +1895,11 @@ def test_incr_restore_zero_size_file_lsn(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata3, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_incremental_partial_restore_exclude_checksum(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2060,11 +1938,11 @@ def test_incremental_partial_restore_exclude_checksum(self): # restore FULL backup into second node2 node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1')) + base_dir=os.path.join(self.module_name, self.fname, 'node1')) node1.cleanup() node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() # restore some data into node2 @@ -2085,7 +1963,9 @@ def test_incremental_partial_restore_exclude_checksum(self): node2, options=[ "--db-exclude=db1", "--db-exclude=db5", - "-I", "checksum"]) + "-I", "checksum", + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2118,15 +1998,11 @@ def test_incremental_partial_restore_exclude_checksum(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_incremental_partial_restore_exclude_lsn(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2167,11 +2043,11 @@ def test_incremental_partial_restore_exclude_lsn(self): # restore FULL backup into second node2 node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1')) + base_dir=os.path.join(self.module_name, self.fname, 'node1')) node1.cleanup() node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() # restore some data into node2 @@ -2195,7 +2071,9 @@ def test_incremental_partial_restore_exclude_lsn(self): node2, options=[ "--db-exclude=db1", "--db-exclude=db5", - "-I", "lsn"]) + "-I", "lsn", + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2228,15 +2106,11 @@ def test_incremental_partial_restore_exclude_lsn(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_incremental_partial_restore_exclude_tablespace_checksum(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2282,13 +2156,13 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): # node1 node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1')) + base_dir=os.path.join(self.module_name, self.fname, 'node1')) node1.cleanup() node1_tablespace = self.get_tblspace_path(node1, 'somedata') # node2 node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() node2_tablespace = self.get_tblspace_path(node2, 'somedata') @@ -2319,7 +2193,8 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): "--db-exclude=db1", "--db-exclude=db5", "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) + node_tablespace, node2_tablespace), + "--destroy-all-other-dbs"]) # we should die here because exception is what we expect to happen self.assertEqual( 1, 0, @@ -2340,7 +2215,9 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): "--db-exclude=db1", "--db-exclude=db5", "-T", "{0}={1}".format( - node_tablespace, node2_tablespace)]) + node_tablespace, node2_tablespace), + "--destroy-all-other-dbs", + ]) pgdata2 = self.pgdata_content(node2.data_dir) @@ -2372,17 +2249,134 @@ def test_incremental_partial_restore_exclude_tablespace_checksum(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) + def test_incremental_partial_restore_deny(self): + """ + Do now allow partial incremental restore into non-empty PGDATA + becase we can't limit WAL replay to a single database. + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + for i in range(1, 3): + node.safe_psql('postgres', f'CREATE database db{i}') + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + pgdata = self.pgdata_content(node.data_dir) + + try: + self.restore_node(backup_dir, 'node', node, options=["--db-include=db1", '-I', 'LSN']) + self.fail("incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Postmaster is running.", e.message) + + node.safe_psql('db2', 'create table x (id int)') + node.safe_psql('db2', 'insert into x values (42)') + + node.stop() + + try: + self.restore_node(backup_dir, 'node', node, options=["--db-include=db1", '-I', 'LSN']) + self.fail("because incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden", e.message) + + node.slow_start() + value = node.execute('db2', 'select * from x')[0][0] + self.assertEqual(42, value) + + def test_deny_incremental_partial_restore_exclude_tablespace_checksum(self): + """ + Do now allow partial incremental restore into non-empty PGDATA + becase we can't limit WAL replay to a single database. + (case of tablespaces) + """ + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + self.create_tblspace_in_node(node, 'somedata') + + node_tablespace = self.get_tblspace_path(node, 'somedata') + + tbl_oid = node.safe_psql( + 'postgres', + "SELECT oid " + "FROM pg_tablespace " + "WHERE spcname = 'somedata'").rstrip() + + for i in range(1, 10, 1): + node.safe_psql( + 'postgres', + 'CREATE database db{0} tablespace somedata'.format(i)) + + db_list_raw = node.safe_psql( + 'postgres', + 'SELECT to_json(a) ' + 'FROM (SELECT oid, datname FROM pg_database) a').rstrip() + + db_list_splitted = db_list_raw.splitlines() + + db_list = {} + for line in db_list_splitted: + line = json.loads(line) + db_list[line['datname']] = line['oid'] + + # FULL backup + backup_id = self.backup_node(backup_dir, 'node', node) + + # node2 + node2 = self.make_simple_node('node2') + node2.cleanup() + node2_tablespace = self.get_tblspace_path(node2, 'somedata') + + # in node2 restore full backup + self.restore_node( + backup_dir, 'node', + node2, options=[ + "-T", f"{node_tablespace}={node2_tablespace}"]) + + # partial incremental restore into node2 + try: + self.restore_node(backup_dir, 'node', node2, + options=["-I", "checksum", + "--db-exclude=db1", + "--db-exclude=db5", + "-T", f"{node_tablespace}={node2_tablespace}"]) + self.fail("remapped tablespace contain old data") + except ProbackupException as e: + pass + + try: + self.restore_node(backup_dir, 'node', node2, + options=[ + "-I", "checksum", "--force", + "--db-exclude=db1", "--db-exclude=db5", + "-T", f"{node_tablespace}={node2_tablespace}"]) + self.fail("incremental partial restore is not allowed") + except ProbackupException as e: + self.assertIn("Incremental restore is not allowed: Partial incremental restore into non-empty PGDATA is forbidden", e.message) def test_incremental_pg_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2391,7 +2385,7 @@ def test_incremental_pg_filenode_map(self): node.slow_start() node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), initdb_params=['--data-checksums']) node1.cleanup() @@ -2432,7 +2426,207 @@ def test_incremental_pg_filenode_map(self): 'postgres', 'select 1') - # Clean after yourself - self.del_test_dir(module_name, fname) - # check that MinRecPoint and BackupStartLsn are correctly used in case of --incrementa-lsn + + # @unittest.skip("skip") + def test_incr_restore_issue_313(self): + """ + Check that failed incremental restore can be restarted + """ + self._check_gdb_flag_or_skip_test + node = self.make_simple_node('node', + set_replication=True, + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale = 50) + + full_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='full') + + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '1', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + last_backup_id = self.backup_node(backup_dir, 'node', node, backup_type='delta') + + pgdata = self.pgdata_content(node.data_dir) + node.cleanup() + + self.restore_node(backup_dir, 'node', node, backup_id=full_backup_id) + + count = 0 + filelist = self.get_backup_filelist(backup_dir, 'node', last_backup_id) + for file in filelist: + # count only nondata files + if int(filelist[file]['is_datafile']) == 0 and \ + not stat.S_ISDIR(int(filelist[file]['mode'])) and \ + not filelist[file]['size'] == '0' and \ + file != 'database_map': + count += 1 + + gdb = self.restore_node(backup_dir, 'node', node, gdb=True, + backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) + gdb.verbose = False + gdb.set_breakpoint('restore_non_data_file') + gdb.run_until_break() + gdb.continue_execution_until_break(count - 1) + gdb.quit() + + bak_file = os.path.join(node.data_dir, 'global', 'pg_control.pbk.bak') + self.assertTrue( + os.path.exists(bak_file), + "pg_control bak File should not exist: {0}".format(bak_file)) + + try: + node.slow_start() + # we should die here because exception is what we expect to happen + self.assertEqual( + 1, 0, + "Expecting Error because backup is not fully restored") + except StartNodeException as e: + self.assertIn( + 'Cannot start node', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) + + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + if self.pg_config_version >= 120000: + self.assertIn( + "PANIC: could not read file \"global/pg_control\"", + f.read()) + else: + self.assertIn( + "PANIC: could not read from control file", + f.read()) + self.restore_node(backup_dir, 'node', node, + backup_id=last_backup_id, options=['--progress', '--incremental-mode=checksum']) + node.slow_start() + self.compare_pgdata(pgdata, self.pgdata_content(node.data_dir)) + + # @unittest.skip("skip") + def test_skip_pages_at_non_zero_segment_checksum(self): + if self.remote: + self.skipTest("Skipped because this test doesn't work properly in remote mode yet") + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # create table of size > 1 GB, so it will have several segments + node.safe_psql( + 'postgres', + "create table t as select i as a, i*2 as b, i*3 as c, i*4 as d, i*5 as e " + "from generate_series(1,20600000) i; " + "CHECKPOINT ") + + filepath = node.safe_psql( + 'postgres', + "SELECT pg_relation_filepath('t')" + ).decode('utf-8').rstrip() + + # segment .1 must exist in order to proceed this test + self.assertTrue(os.path.exists(f'{os.path.join(node.data_dir, filepath)}.1')) + + # do full backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 101; " + "CHECKPOINT") + + # do incremental backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 201; " + "CHECKPOINT") + + node.stop() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=checksum", "--log-level-console=INFO"]) + + self.assertNotIn('WARNING: Corruption detected in file', self.output, + 'Incremental restore copied pages from .1 datafile segment that were not changed') + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) + + # @unittest.skip("skip") + def test_skip_pages_at_non_zero_segment_lsn(self): + if self.remote: + self.skipTest("Skipped because this test doesn't work properly in remote mode yet") + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums'], + pg_options={'wal_log_hints': 'on'}) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # create table of size > 1 GB, so it will have several segments + node.safe_psql( + 'postgres', + "create table t as select i as a, i*2 as b, i*3 as c, i*4 as d, i*5 as e " + "from generate_series(1,20600000) i; " + "CHECKPOINT ") + + filepath = node.safe_psql( + 'postgres', + "SELECT pg_relation_filepath('t')" + ).decode('utf-8').rstrip() + + # segment .1 must exist in order to proceed this test + self.assertTrue(os.path.exists(f'{os.path.join(node.data_dir, filepath)}.1')) + + # do full backup + self.backup_node(backup_dir, 'node', node) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 101; " + "CHECKPOINT") + + # do incremental backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + pgdata = self.pgdata_content(node.data_dir) + + node.safe_psql( + 'postgres', + "DELETE FROM t WHERE a < 201; " + "CHECKPOINT") + + node.stop() + + self.restore_node( + backup_dir, 'node', node, options=["-j", "4", "--incremental-mode=lsn", "--log-level-console=INFO"]) + + self.assertNotIn('WARNING: Corruption detected in file', self.output, + 'Incremental restore copied pages from .1 datafile segment that were not changed') + + pgdata_restored = self.pgdata_content(node.data_dir) + self.compare_pgdata(pgdata, pgdata_restored) diff --git a/tests/init.py b/tests/init_test.py similarity index 81% rename from tests/init.py rename to tests/init_test.py index f5715d249..4e000c78f 100644 --- a/tests/init.py +++ b/tests/init_test.py @@ -4,18 +4,14 @@ import shutil -module_name = 'init' - - class InitTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_success(self): """Success normal init""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.assertEqual( dir_files(backup_dir), @@ -60,19 +56,16 @@ def test_success(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "ERROR: Required parameter not specified: PGDATA (-D, --pgdata)", + "ERROR: No postgres data directory specified.\n" + "Please specify it either using environment variable PGDATA or\ncommand line option --pgdata (-D)", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(e.message, self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_already_exist(self): """Failure with backup catalog already existed""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) try: self.show_pb(backup_dir, 'node') @@ -84,15 +77,11 @@ def test_already_exist(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_abs_path(self): """failure with backup catalog should be given as absolute path""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) try: self.run_pb(["init", "-B", os.path.relpath("%s/backup" % node.base_dir, self.dir_path)]) self.assertEqual(1, 0, 'Expecting Error due to initialization with non-absolute path in --backup-path. Output: {0} \n CMD: {1}'.format( @@ -103,18 +92,14 @@ def test_abs_path(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_add_instance_idempotence(self): """ https://github.com/postgrespro/pg_probackup/issues/219 """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -152,6 +137,3 @@ def test_add_instance_idempotence(self): e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/locking.py b/tests/locking_test.py similarity index 88% rename from tests/locking.py rename to tests/locking_test.py index ef7aa1f25..5367c2610 100644 --- a/tests/locking.py +++ b/tests/locking_test.py @@ -4,9 +4,6 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'locking' - - class LockingTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -17,12 +14,13 @@ def test_locking_running_validate_1(self): run validate, expect it to successfully executed, concurrent RUNNING backup with pid file and active process is legal """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -62,7 +60,6 @@ def test_locking_running_validate_1(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_running_validate_2(self): """ @@ -72,12 +69,13 @@ def test_locking_running_validate_2(self): RUNNING backup with pid file AND without active pid is legal, but his status must be changed to ERROR and pid file is deleted """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -131,7 +129,6 @@ def test_locking_running_validate_2(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_running_validate_2_specific_id(self): """ @@ -142,12 +139,13 @@ def test_locking_running_validate_2_specific_id(self): RUNNING backup with pid file AND without active pid is legal, but his status must be changed to ERROR and pid file is deleted """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -230,7 +228,6 @@ def test_locking_running_validate_2_specific_id(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_running_3(self): """ @@ -240,12 +237,13 @@ def test_locking_running_3(self): RUNNING backup without pid file AND without active pid is legal, his status must be changed to ERROR """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -300,7 +298,6 @@ def test_locking_running_3(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_restore_locked(self): """ @@ -310,12 +307,13 @@ def test_locking_restore_locked(self): Expect restore to sucseed because read-only locks do not conflict """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -342,7 +340,6 @@ def test_locking_restore_locked(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_concurrent_delete_and_restore(self): """ @@ -352,12 +349,13 @@ def test_concurrent_delete_and_restore(self): Expect restore to fail because validation of intermediate backup is impossible """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -399,7 +397,6 @@ def test_concurrent_delete_and_restore(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_concurrent_validate_and_backup(self): """ @@ -407,12 +404,13 @@ def test_locking_concurrent_validate_and_backup(self): and stop it in the middle, take page backup. Expect PAGE backup to be successfully executed """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -435,7 +433,6 @@ def test_locking_concurrent_validate_and_backup(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_locking_concurren_restore_and_delete(self): """ @@ -443,12 +440,13 @@ def test_locking_concurren_restore_and_delete(self): and stop it in the middle, delete full backup. Expect it to fail. """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -479,17 +477,15 @@ def test_locking_concurren_restore_and_delete(self): # Clean after yourself gdb.kill() - self.del_test_dir(module_name, fname) def test_backup_directory_name(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -533,18 +529,16 @@ def test_backup_directory_name(self): self.show_pb(backup_dir, 'node', page_id_2)) # Clean after yourself - self.del_test_dir(module_name, fname) def test_empty_lock_file(self): """ https://github.com/postgrespro/pg_probackup/issues/308 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -578,19 +572,17 @@ def test_empty_lock_file(self): # p1.wait() # p2.wait() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_shared_lock(self): """ Make sure that shared lock leaves no files with pids """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -635,5 +627,3 @@ def test_shared_lock(self): os.path.exists(lockfile_shr), "File should not exist: {0}".format(lockfile_shr)) - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/logging.py b/tests/logging_test.py similarity index 85% rename from tests/logging.py rename to tests/logging_test.py index 47143cfb7..c5cdfa344 100644 --- a/tests/logging.py +++ b/tests/logging_test.py @@ -3,22 +3,22 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import datetime -module_name = 'logging' - - class LogTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure # PGPRO-2154 def test_log_rotation(self): - fname = self.id().split('.')[3] + """ + """ + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -39,17 +39,13 @@ def test_log_rotation(self): gdb.run_until_break() gdb.continue_execution_until_exit() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_log_filename_strftime(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -72,17 +68,13 @@ def test_log_filename_strftime(self): self.assertTrue(os.path.isfile(path)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_truncate_rotation_file(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -147,17 +139,13 @@ def test_truncate_rotation_file(self): self.assertTrue(os.path.isfile(rotation_file_path)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_unlink_rotation_file(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -219,17 +207,13 @@ def test_unlink_rotation_file(self): os.stat(log_file_path).st_size, log_file_size) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_garbage_in_rotation_file(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -257,9 +241,6 @@ def test_garbage_in_rotation_file(self): # mangle .rotation file with open(rotation_file_path, "w+b", 0) as f: f.write(b"blah") - f.flush() - f.close - output = self.backup_node( backup_dir, 'node', node, options=[ @@ -298,24 +279,20 @@ def test_garbage_in_rotation_file(self): os.stat(log_file_path).st_size, log_file_size) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_issue_274(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'node', node, options=['--stream']) @@ -366,6 +343,3 @@ def test_issue_274(self): log_content = f.read() self.assertIn('INFO: command:', log_content) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/merge.py b/tests/merge_test.py similarity index 88% rename from tests/merge.py rename to tests/merge_test.py index fe0927f49..1d40af7f7 100644 --- a/tests/merge.py +++ b/tests/merge_test.py @@ -9,21 +9,17 @@ import time import subprocess -module_name = "merge" - - class MergeTest(ProbackupTest, unittest.TestCase): def test_basic_merge_full_page(self): """ Test MERGE command, it merges FULL backup with target PAGE backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=["--data-checksums"]) self.init_pb(backup_dir) @@ -100,19 +96,15 @@ def test_basic_merge_full_page(self): count2 = node.execute("postgres", "select count(*) from test") self.assertEqual(count1, count2) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_compressed_backups(self): """ Test MERGE command with compressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=["--data-checksums"]) self.init_pb(backup_dir) @@ -163,18 +155,16 @@ def test_merge_compressed_backups(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) def test_merge_compressed_backups_1(self): """ Test MERGE command with compressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=["--data-checksums"]) self.init_pb(backup_dir) @@ -234,18 +224,16 @@ def test_merge_compressed_backups_1(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) def test_merge_compressed_and_uncompressed_backups(self): """ Test MERGE command with compressed and uncompressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], ) @@ -306,18 +294,16 @@ def test_merge_compressed_and_uncompressed_backups(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) def test_merge_compressed_and_uncompressed_backups_1(self): """ Test MERGE command with compressed and uncompressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], ) @@ -380,18 +366,16 @@ def test_merge_compressed_and_uncompressed_backups_1(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) def test_merge_compressed_and_uncompressed_backups_2(self): """ Test MERGE command with compressed and uncompressed backups """ - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, "backup") + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, "backup") # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=["--data-checksums"], ) @@ -450,11 +434,6 @@ def test_merge_compressed_and_uncompressed_backups_2(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - node.cleanup() - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") def test_merge_tablespaces(self): """ @@ -463,10 +442,9 @@ def test_merge_tablespaces(self): tablespace, take page backup, merge it and restore """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) @@ -538,10 +516,9 @@ def test_merge_tablespaces_1(self): drop first tablespace and take delta backup, merge it and restore """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ) @@ -607,9 +584,6 @@ def test_merge_tablespaces_1(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_page_truncate(self): """ make node, create table, take full backup, @@ -617,17 +591,16 @@ def test_merge_page_truncate(self): take page backup, merge full and page, restore last page backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -686,19 +659,11 @@ def test_merge_page_truncate(self): node_restored.slow_start() # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") + result1 = node.table_checksum("t_heap") + result2 = node_restored.table_checksum("t_heap") self.assertEqual(result1, result2) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_delta_truncate(self): """ make node, create table, take full backup, @@ -706,17 +671,16 @@ def test_merge_delta_truncate(self): take page backup, merge full and page, restore last page backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -775,19 +739,11 @@ def test_merge_delta_truncate(self): node_restored.slow_start() # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") + result1 = node.table_checksum("t_heap") + result2 = node_restored.table_checksum("t_heap") self.assertEqual(result1, result2) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_ptrack_truncate(self): """ make node, create table, take full backup, @@ -796,12 +752,11 @@ def test_merge_ptrack_truncate(self): restore last page backup and check data correctness """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], ptrack_enable=True) @@ -850,7 +805,7 @@ def test_merge_ptrack_truncate(self): self.validate_pb(backup_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') @@ -871,19 +826,11 @@ def test_merge_ptrack_truncate(self): node_restored.slow_start() # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") + result1 = node.table_checksum("t_heap") + result2 = node_restored.table_checksum("t_heap") self.assertEqual(result1, result2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_merge_delta_delete(self): """ @@ -891,10 +838,9 @@ def test_merge_delta_delete(self): alter tablespace location, take delta backup, merge full and delta, restore database. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -943,7 +889,7 @@ def test_merge_delta_delete(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -967,19 +913,17 @@ def test_merge_delta_delete(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_continue_failed_merge(self): """ Check that failed MERGE can be continued """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join( - module_name, fname, 'node'), + self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1043,18 +987,16 @@ def test_continue_failed_merge(self): node.cleanup() self.restore_node(backup_dir, 'node', node) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_continue_failed_merge_with_corrupted_delta_backup(self): """ Fail merge via gdb, corrupt DELTA backup, try to continue merge """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -1141,17 +1083,15 @@ def test_continue_failed_merge_with_corrupted_delta_backup(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_continue_failed_merge_2(self): """ Check that failed MERGE on delete can be continued """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -1211,18 +1151,17 @@ def test_continue_failed_merge_2(self): # Try to continue failed MERGE self.merge_backup(backup_dir, "node", backup_id) - # Clean after yourself - self.del_test_dir(module_name, fname) def test_continue_failed_merge_3(self): """ Check that failed MERGE cannot be continued if intermediate backup is missing. """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -1307,17 +1246,13 @@ def test_continue_failed_merge_3(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_merge_different_compression_algo(self): """ Check that backups with different compression algorithms can be merged """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1360,17 +1295,14 @@ def test_merge_different_compression_algo(self): self.merge_backup(backup_dir, "node", backup_id) - self.del_test_dir(module_name, fname) - def test_merge_different_wal_modes(self): """ Check that backups with different wal modes can be merged correctly """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1402,17 +1334,16 @@ def test_merge_different_wal_modes(self): self.assertEqual( 'STREAM', self.show_pb(backup_dir, 'node', backup_id)['wal']) - self.del_test_dir(module_name, fname) - def test_crash_after_opening_backup_control_1(self): """ check that crashing after opening backup.control for writing will not result in losing backup metadata """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1452,8 +1383,6 @@ def test_crash_after_opening_backup_control_1(self): self.assertEqual( 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_crash_after_opening_backup_control_2(self): """ @@ -1461,10 +1390,11 @@ def test_crash_after_opening_backup_control_2(self): for writing will not result in losing metadata about backup files TODO: rewrite """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1543,8 +1473,6 @@ def test_crash_after_opening_backup_control_2(self): self.compare_pgdata(pgdata, pgdata_restored) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_losing_file_after_failed_merge(self): """ @@ -1552,10 +1480,11 @@ def test_losing_file_after_failed_merge(self): for writing will not result in losing metadata about backup files TODO: rewrite """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1634,15 +1563,14 @@ def test_losing_file_after_failed_merge(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - self.del_test_dir(module_name, fname) - def test_failed_merge_after_delete(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1715,15 +1643,14 @@ def test_failed_merge_after_delete(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.del_test_dir(module_name, fname) - def test_failed_merge_after_delete_1(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1791,15 +1718,14 @@ def test_failed_merge_after_delete_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.del_test_dir(module_name, fname) - def test_failed_merge_after_delete_2(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1853,15 +1779,14 @@ def test_failed_merge_after_delete_2(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.del_test_dir(module_name, fname) - def test_failed_merge_after_delete_3(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1941,21 +1866,22 @@ def test_failed_merge_after_delete_3(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") def test_merge_backup_from_future(self): """ take FULL backup, table PAGE backup from future, try to merge page with FULL """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1990,12 +1916,10 @@ def test_merge_backup_from_future(self): backup_id = self.backup_node(backup_dir, 'node', node, backup_type='page') pgdata = self.pgdata_content(node.data_dir) - result = node.safe_psql( - 'postgres', - 'SELECT * from pgbench_accounts') + result = node.table_checksum("pgbench_accounts") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2018,17 +1942,12 @@ def test_merge_backup_from_future(self): {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.safe_psql( - 'postgres', - 'SELECT * from pgbench_accounts') + result_new = node_restored.table_checksum("pgbench_accounts") - self.assertTrue(result, result_new) + self.assertEqual(result, result_new) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_merge_multiple_descendants(self): """ @@ -2041,12 +1960,11 @@ def test_merge_multiple_descendants(self): FULLb | FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2215,9 +2133,6 @@ def test_merge_multiple_descendants(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_smart_merge(self): """ @@ -2227,13 +2142,12 @@ def test_smart_merge(self): copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2275,16 +2189,14 @@ def test_smart_merge(self): with open(logfile, 'r') as f: logfile_content = f.read() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_idempotent_merge(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2350,18 +2262,15 @@ def test_idempotent_merge(self): self.assertEqual( page_id_2, self.show_pb(backup_dir, 'node')[0]['id']) - self.del_test_dir(module_name, fname) - def test_merge_correct_inheritance(self): """ Make sure that backup metainformation fields 'note' and 'expire-time' are correctly inherited during merge """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2404,18 +2313,15 @@ def test_merge_correct_inheritance(self): page_meta['expire-time'], self.show_pb(backup_dir, 'node', page_id)['expire-time']) - self.del_test_dir(module_name, fname) - def test_merge_correct_inheritance_1(self): """ Make sure that backup metainformation fields 'note' and 'expire-time' are correctly inherited during merge """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2453,8 +2359,6 @@ def test_merge_correct_inheritance_1(self): 'expire-time', self.show_pb(backup_dir, 'node', page_id)) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_multi_timeline_merge(self): @@ -2469,10 +2373,9 @@ def test_multi_timeline_merge(self): P must have F as parent """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2536,11 +2439,10 @@ def test_multi_timeline_merge(self): self.merge_backup(backup_dir, 'node', page_id) - result = node.safe_psql( - "postgres", "select * from pgbench_accounts") + result = node.table_checksum("pgbench_accounts") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -2549,8 +2451,7 @@ def test_multi_timeline_merge(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.safe_psql( - "postgres", "select * from pgbench_accounts") + result_new = node_restored.table_checksum("pgbench_accounts") self.assertEqual(result, result_new) @@ -2570,9 +2471,6 @@ def test_multi_timeline_merge(self): '--amcheck', '-d', 'postgres', '-p', str(node_restored.port)]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_merge_page_header_map_retry(self): @@ -2580,10 +2478,11 @@ def test_merge_page_header_map_retry(self): page header map cannot be trusted when running retry """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2619,17 +2518,15 @@ def test_merge_page_header_map_retry(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_missing_data_file(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2678,16 +2575,15 @@ def test_missing_data_file(self): 'ERROR: Cannot open backup file "{0}": No such file or directory'.format(file_to_remove), logfile_content) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_missing_non_data_file(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2735,16 +2631,15 @@ def test_missing_non_data_file(self): self.assertEqual( 'MERGING', self.show_pb(backup_dir, 'node')[1]['status']) - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_merge_remote_mode(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2791,16 +2686,13 @@ def test_merge_remote_mode(self): self.assertEqual( 'OK', self.show_pb(backup_dir, 'node')[0]['status']) - self.del_test_dir(module_name, fname) - def test_merge_pg_filenode_map(self): """ https://github.com/postgrespro/pg_probackup/issues/320 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2809,7 +2701,7 @@ def test_merge_pg_filenode_map(self): node.slow_start() node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), initdb_params=['--data-checksums']) node1.cleanup() @@ -2842,8 +2734,48 @@ def test_merge_pg_filenode_map(self): 'postgres', 'select 1') - # Clean after yourself - self.del_test_dir(module_name, fname) + def test_unfinished_merge(self): + """ Test when parent has unfinished merge with a different backup. """ + self._check_gdb_flag_or_skip_test() + cases = [('fail_merged', 'write_backup_filelist', ['MERGED', 'MERGING', 'OK']), + ('fail_merging', 'pgBackupWriteControl', ['MERGING', 'OK', 'OK'])] + + for name, terminate_at, states in cases: + node_name = 'node_' + name + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, name) + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, node_name), + set_replication=True, + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, node_name, node) + self.set_archiving(backup_dir, node_name, node) + node.slow_start() + + full_id=self.backup_node(backup_dir, node_name, node, options=['--stream']) + + backup_id = self.backup_node(backup_dir, node_name, node, backup_type='delta') + second_backup_id = self.backup_node(backup_dir, node_name, node, backup_type='delta') + + gdb = self.merge_backup(backup_dir, node_name, backup_id, gdb=True) + gdb.set_breakpoint(terminate_at) + gdb.run_until_break() + + gdb.remove_all_breakpoints() + gdb._execute('signal SIGINT') + gdb.continue_execution_until_error() + + print(self.show_pb(backup_dir, node_name, as_json=False, as_text=True)) + + backup_infos = self.show_pb(backup_dir, node_name) + self.assertEqual(len(backup_infos), len(states)) + for expected, real in zip(states, backup_infos): + self.assertEqual(expected, real['status']) + + with self.assertRaisesRegex(ProbackupException, + f"Full backup {full_id} has unfinished merge with backup {backup_id}"): + self.merge_backup(backup_dir, node_name, second_backup_id, gdb=False) # 1. Need new test with corrupted FULL backup # 2. different compression levels diff --git a/tests/option.py b/tests/option_test.py similarity index 72% rename from tests/option.py rename to tests/option_test.py index 023a0c2c6..d1e8cb3a6 100644 --- a/tests/option.py +++ b/tests/option_test.py @@ -1,10 +1,7 @@ import unittest import os from .helpers.ptrack_helpers import ProbackupTest, ProbackupException - - -module_name = 'option' - +import locale class OptionTest(ProbackupTest, unittest.TestCase): @@ -18,15 +15,6 @@ def test_help_1(self): help_out.read().decode("utf-8") ) - # @unittest.skip("skip") - def test_version_2(self): - """help options""" - with open(os.path.join(self.dir_path, "expected/option_version.out"), "rb") as version_out: - self.assertIn( - version_out.read().decode("utf-8"), - self.run_pb(["--version"]) - ) - # @unittest.skip("skip") def test_without_backup_path_3(self): """backup command failure without backup mode option""" @@ -36,18 +24,18 @@ def test_without_backup_path_3(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: BACKUP_PATH (-B, --backup-path)', + 'ERROR: No backup catalog path specified.\n' + \ + 'Please specify it either using environment variable BACKUP_PATH or\n' + \ + 'command line option --backup-path (-B)', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # @unittest.skip("skip") def test_options_4(self): """check options test""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -59,7 +47,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: --instance', + 'ERROR: Required parameter not specified: --instance', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -70,7 +58,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: BACKUP_MODE (-b, --backup-mode)', + 'ERROR: No backup mode specified.\nPlease specify it either using environment variable BACKUP_MODE or\ncommand line option --backup-mode (-b)', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -81,7 +69,7 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: invalid backup-mode "bad"', + 'ERROR: Invalid backup-mode "bad"', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) @@ -107,29 +95,20 @@ def test_options_4(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - "option requires an argument -- 'i'", + "Option '-i' requires an argument", e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_options_5(self): """check options test""" - fname = self.id().split(".")[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) output = self.init_pb(backup_dir) - self.assertIn( - "INFO: Backup catalog", - output) + self.assertIn(f"INFO: Backup catalog '{backup_dir}' successfully initialized", output) - self.assertIn( - "successfully inited", - output) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -224,5 +203,51 @@ def test_options_5(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) + # @unittest.skip("skip") + def test_help_6(self): + """help options""" + if ProbackupTest.enable_nls: + if check_locale('ru_RU.utf-8'): + self.test_env['LC_ALL'] = 'ru_RU.utf-8' + with open(os.path.join(self.dir_path, "expected/option_help_ru.out"), "rb") as help_out: + self.assertEqual( + self.run_pb(["--help"]), + help_out.read().decode("utf-8") + ) + else: + self.skipTest( + "Locale ru_RU.utf-8 doesn't work. You need install ru_RU.utf-8 locale for this test") + else: + self.skipTest( + 'You need configure PostgreSQL with --enabled-nls option for this test') + + # @unittest.skip("skip") + def test_options_no_scale_units(self): + """check --no-scale-units option""" + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node')) + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + # check that --no-scale-units option works correctly + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node"]) + self.assertIn(container=output, member="archive-timeout = 5min") + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--no-scale-units"]) + self.assertIn(container=output, member="archive-timeout = 300") + self.assertNotIn(container=output, member="archive-timeout = 300s") + # check that we have now quotes ("") in json output + output = self.run_pb(["show-config", "--backup-path", backup_dir, "--instance=node", "--no-scale-units", "--format=json"]) + self.assertIn(container=output, member='"archive-timeout": 300,') + self.assertIn(container=output, member='"retention-redundancy": 0,') + self.assertNotIn(container=output, member='"archive-timeout": "300",') + +def check_locale(locale_name): + ret=True + old_locale = locale.setlocale(locale.LC_CTYPE,"") + try: + locale.setlocale(locale.LC_CTYPE, locale_name) + except locale.Error: + ret=False + finally: + locale.setlocale(locale.LC_CTYPE, old_locale) + return ret diff --git a/tests/page.py b/tests/page_test.py similarity index 87% rename from tests/page.py rename to tests/page_test.py index c1cba6b40..a66d6d413 100644 --- a/tests/page.py +++ b/tests/page_test.py @@ -6,9 +6,7 @@ import subprocess import gzip import shutil - -module_name = 'page' - +import time class PageTest(ProbackupTest, unittest.TestCase): @@ -20,17 +18,16 @@ def test_basic_page_vacuum_truncate(self): take page backup, take second page backup, restore last page backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '300s'}) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -88,19 +85,11 @@ def test_basic_page_vacuum_truncate(self): node_restored.slow_start() # Logical comparison - result1 = node.safe_psql( - "postgres", - "select * from t_heap") - - result2 = node_restored.safe_psql( - "postgres", - "select * from t_heap") + result1 = node.table_checksum("t_heap") + result2 = node_restored.table_checksum("t_heap") self.assertEqual(result1, result2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_vacuum_truncate_1(self): """ @@ -109,10 +98,9 @@ def test_page_vacuum_truncate_1(self): take page backup, insert some data, take second page backup and check data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -159,7 +147,7 @@ def test_page_vacuum_truncate_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -171,9 +159,6 @@ def test_page_vacuum_truncate_1(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_stream(self): """ @@ -181,10 +166,9 @@ def test_page_stream(self): restore them and check data correctness """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -203,7 +187,7 @@ def test_page_stream(self): "md5(i::text)::tsvector as tsvector " "from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full', options=['--stream']) @@ -214,7 +198,7 @@ def test_page_stream(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=['--stream', '-j', '4']) @@ -235,7 +219,7 @@ def test_page_stream(self): ' CMD: {1}'.format(repr(self.output), self.cmd)) node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -254,13 +238,10 @@ def test_page_stream(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_archive(self): """ @@ -268,10 +249,9 @@ def test_page_archive(self): restore them and check data correctness """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -288,7 +268,7 @@ def test_page_archive(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - full_result = node.execute("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='full') @@ -298,7 +278,7 @@ def test_page_archive(self): "insert into t_heap select i as id, " "md5(i::text) as text, md5(i::text)::tsvector as tsvector " "from generate_series(100, 200) i") - page_result = node.execute("postgres", "SELECT * FROM t_heap") + page_result = node.table_checksum("t_heap") page_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page', options=["-j", "4"]) @@ -324,7 +304,7 @@ def test_page_archive(self): node.slow_start() - full_result_new = node.execute("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -348,23 +328,19 @@ def test_page_archive(self): node.slow_start() - page_result_new = node.execute("postgres", "SELECT * FROM t_heap") + page_result_new = node.table_checksum("t_heap") self.assertEqual(page_result, page_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_multiple_segments(self): """ Make node, create table with multiple segments, write some data to it, check page and data correctness """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -390,7 +366,7 @@ def test_page_multiple_segments(self): pgbench.wait() # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select count(*) from pgbench_accounts") + result = node.table_checksum("pgbench_accounts") # PAGE BACKUP self.backup_node(backup_dir, 'node', node, backup_type='page') @@ -399,7 +375,7 @@ def test_page_multiple_segments(self): # RESTORE NODE restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'restored_node')) + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( @@ -418,8 +394,7 @@ def test_page_multiple_segments(self): self.set_auto_conf(restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.safe_psql( - "postgres", "select count(*) from pgbench_accounts") + result_new = restored_node.table_checksum("pgbench_accounts") # COMPARE RESTORED FILES self.assertEqual(result, result_new, 'data is lost') @@ -427,9 +402,6 @@ def test_page_multiple_segments(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_delete(self): """ @@ -437,10 +409,9 @@ def test_page_delete(self): delete everything from table, vacuum table, take page backup, restore page backup, compare . """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -477,7 +448,7 @@ def test_page_delete(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -499,9 +470,6 @@ def test_page_delete(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_delete_1(self): """ @@ -509,10 +477,9 @@ def test_page_delete_1(self): delete everything from table, vacuum table, take page backup, restore page backup, compare . """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -554,7 +521,7 @@ def test_page_delete_1(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored') + base_dir=os.path.join(self.module_name, self.fname, 'node_restored') ) node_restored.cleanup() @@ -577,26 +544,22 @@ def test_page_delete_1(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_parallel_pagemap(self): """ Test for parallel WAL segments reading, during which pagemap is built """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={ "hot_standby": "on" } ) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), ) self.init_pb(backup_dir) @@ -652,18 +615,16 @@ def test_parallel_pagemap(self): # Clean after yourself node.cleanup() node_restored.cleanup() - self.del_test_dir(module_name, fname) def test_parallel_pagemap_1(self): """ Test for parallel WAL segments reading, during which pagemap is built """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') # Initialize instance and backup directory node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={} ) @@ -704,7 +665,6 @@ def test_parallel_pagemap_1(self): # Clean after yourself node.cleanup() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_page_backup_with_lost_wal_segment(self): @@ -715,12 +675,11 @@ def test_page_backup_with_lost_wal_segment(self): run page backup, expecting error because of missing wal segment make sure that backup status is 'ERROR' """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -785,9 +744,6 @@ def test_page_backup_with_lost_wal_segment(self): self.show_pb(backup_dir, 'node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_backup_with_corrupted_wal_segment(self): """ @@ -797,12 +753,11 @@ def test_page_backup_with_corrupted_wal_segment(self): run page backup, expecting error because of missing wal segment make sure that backup status is 'ERROR' """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -896,9 +851,6 @@ def test_page_backup_with_corrupted_wal_segment(self): self.show_pb(backup_dir, 'node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_backup_with_alien_wal_segment(self): """ @@ -910,18 +862,17 @@ def test_page_backup_with_alien_wal_segment(self): expecting error because of alien wal segment make sure that backup status is 'ERROR' """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) alien_node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'alien_node'), + base_dir=os.path.join(self.module_name, self.fname, 'alien_node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -943,7 +894,7 @@ def test_page_backup_with_alien_wal_segment(self): "create table t_heap as select i as id, " "md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,1000) i;") + "from generate_series(0,10000) i;") alien_node.safe_psql( "postgres", @@ -955,7 +906,7 @@ def test_page_backup_with_alien_wal_segment(self): "create table t_heap_alien as select i as id, " "md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " - "from generate_series(0,100000) i;") + "from generate_series(0,10000) i;") # copy latest wal segment wals_dir = os.path.join(backup_dir, 'wal', 'alien_node') @@ -966,9 +917,9 @@ def test_page_backup_with_alien_wal_segment(self): file = os.path.join(wals_dir, filename) file_destination = os.path.join( os.path.join(backup_dir, 'wal', 'node'), filename) -# file = os.path.join(wals_dir, '000000010000000000000004') - print(file) - print(file_destination) + start = time.time() + while not os.path.exists(file_destination) and time.time() - start < 20: + time.sleep(0.1) os.remove(file_destination) os.rename(file, file_destination) @@ -1017,20 +968,16 @@ def test_page_backup_with_alien_wal_segment(self): self.show_pb(backup_dir, 'node')[2]['status'], 'Backup {0} should have STATUS "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_multithread_page_backup_with_toast(self): """ make node, create toast, do multithread PAGE backup """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1050,9 +997,6 @@ def test_multithread_page_backup_with_toast(self): backup_dir, 'node', node, backup_type='page', options=["-j", "4"]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_page_create_db(self): """ @@ -1060,10 +1004,9 @@ def test_page_create_db(self): restore database and check it presense """ self.maxDiff = None - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1101,7 +1044,7 @@ def test_page_create_db(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1162,9 +1105,6 @@ def test_page_create_db(self): repr(e.message), self.cmd) ) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_multi_timeline_page(self): @@ -1179,10 +1119,9 @@ def test_multi_timeline_page(self): P must have F as parent """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1247,11 +1186,10 @@ def test_multi_timeline_page(self): pgdata = self.pgdata_content(node.data_dir) - result = node.safe_psql( - "postgres", "select * from pgbench_accounts") + result = node.table_checksum("pgbench_accounts") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -1260,8 +1198,7 @@ def test_multi_timeline_page(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start() - result_new = node_restored.safe_psql( - "postgres", "select * from pgbench_accounts") + result_new = node_restored.table_checksum("pgbench_accounts") self.assertEqual(result, result_new) @@ -1303,9 +1240,6 @@ def test_multi_timeline_page(self): backup_list[4]['id']) self.assertEqual(backup_list[5]['current-tli'], 7) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_multitimeline_page_1(self): @@ -1317,10 +1251,9 @@ def test_multitimeline_page_1(self): P must have F as parent """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'wal_log_hints': 'on'}) @@ -1373,7 +1306,7 @@ def test_multitimeline_page_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -1384,22 +1317,18 @@ def test_multitimeline_page_1(self): self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_page_pg_resetxlog(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1478,7 +1407,7 @@ def test_page_pg_resetxlog(self): # pgdata = self.pgdata_content(node.data_dir) # # node_restored = self.make_simple_node( -# base_dir=os.path.join(module_name, fname, 'node_restored')) +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # node_restored.cleanup() # # self.restore_node( @@ -1487,5 +1416,49 @@ def test_page_pg_resetxlog(self): # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) + def test_page_huge_xlog_record(self): + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + initdb_params=['--data-checksums'], + pg_options={ + 'max_locks_per_transaction': '1000', + 'work_mem': '100MB', + 'temp_buffers': '100MB', + 'wal_buffers': '128MB', + 'wal_level' : 'logical', + }) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + node.pgbench_init(scale=3) + + # Do full backup + self.backup_node(backup_dir, 'node', node, backup_type='full') + show_backup = self.show_pb(backup_dir,'node')[0] + + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "FULL") + + # Originally client had the problem at the transaction that (supposedly) + # deletes a lot of temporary tables (probably it was client disconnect). + # It generated ~40MB COMMIT WAL record. + # + # `pg_logical_emit_message` is much simpler and faster way to generate + # such huge record. + node.safe_psql( + "postgres", + "select pg_logical_emit_message(False, 'z', repeat('o', 60*1000*1000))") + + # Do page backup + self.backup_node(backup_dir, 'node', node, backup_type='page') + + show_backup = self.show_pb(backup_dir,'node')[1] + self.assertEqual(show_backup['status'], "OK") + self.assertEqual(show_backup['backup-mode'], "PAGE") diff --git a/tests/pgpro2068.py b/tests/pgpro2068_test.py similarity index 60% rename from tests/pgpro2068.py rename to tests/pgpro2068_test.py index a80d317d4..04f0eb6fa 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068_test.py @@ -9,23 +9,16 @@ from testgres import ProcessType -module_name = '2068' - - class BugTest(ProbackupTest, unittest.TestCase): def test_minrecpoint_on_replica(self): """ https://jira.postgrespro.ru/browse/PGPRO-2068 """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -36,7 +29,7 @@ def test_minrecpoint_on_replica(self): 'bgwriter_lru_multiplier': '4.0', 'max_wal_size': '256MB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -48,7 +41,7 @@ def test_minrecpoint_on_replica(self): # start replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica, options=['-R']) @@ -60,11 +53,6 @@ def test_minrecpoint_on_replica(self): replica, {'port': replica.port, 'restart_after_crash': 'off'}) - # we need those later - node.safe_psql( - "postgres", - "CREATE EXTENSION plpython3u") - node.safe_psql( "postgres", "CREATE EXTENSION pageinspect") @@ -92,7 +80,6 @@ def test_minrecpoint_on_replica(self): # get pids of replica background workers startup_pid = replica.auxiliary_pids[ProcessType.Startup][0] checkpointer_pid = replica.auxiliary_pids[ProcessType.Checkpointer][0] - bgwriter_pid = replica.auxiliary_pids[ProcessType.BackgroundWriter][0] # break checkpointer on UpdateLastRemovedPtr gdb_checkpointer = self.gdb_attach(checkpointer_pid) @@ -115,7 +102,7 @@ def test_minrecpoint_on_replica(self): pgbench.stdout.close() # kill someone, we need a crash - os.kill(int(bgwriter_pid), 9) + replica.kill(someone=ProcessType.BackgroundWriter) gdb_recovery._execute('detach') gdb_checkpointer._execute('detach') @@ -139,48 +126,37 @@ def test_minrecpoint_on_replica(self): recovery_config, "recovery_target_action = 'pause'") replica.slow_start(replica=True) + current_xlog_lsn_query = 'SELECT pg_last_wal_replay_lsn() INTO current_xlog_lsn' if self.get_version(node) < 100000: - script = ''' -DO -$$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' - else: - script = ''' + current_xlog_lsn_query = 'SELECT min_recovery_end_location INTO current_xlog_lsn FROM pg_control_recovery()' + + script = f''' DO $$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("select pg_last_wal_replay_lsn() as lsn")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' +DECLARE + roid oid; + current_xlog_lsn pg_lsn; + pages_from_future RECORD; + found_corruption bool := false; +BEGIN + {current_xlog_lsn_query}; + RAISE NOTICE 'CURRENT LSN: %', current_xlog_lsn; + FOR roid IN select oid from pg_class class where relkind IN ('r', 'i', 't', 'm') and relpersistence = 'p' LOOP + FOR pages_from_future IN + with number_of_blocks as (select blknum from generate_series(0, pg_relation_size(roid) / 8192 -1) as blknum ) + select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid + from number_of_blocks, page_header(get_raw_page(roid::regclass::text, number_of_blocks.blknum::int)) + where lsn > current_xlog_lsn LOOP + RAISE NOTICE 'Found page from future. OID: %, BLKNUM: %, LSN: %', roid, pages_from_future.blknum, pages_from_future.lsn; + found_corruption := true; + END LOOP; + END LOOP; + IF found_corruption THEN + RAISE 'Found Corruption'; + END IF; +END; +$$ LANGUAGE plpgsql; +'''.format(current_xlog_lsn_query=current_xlog_lsn_query) # Find blocks from future replica.safe_psql( @@ -193,6 +169,3 @@ def test_minrecpoint_on_replica(self): # do basebackup # do pg_probackup, expect error - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/pgpro560.py b/tests/pgpro560_test.py similarity index 83% rename from tests/pgpro560.py rename to tests/pgpro560_test.py index 53c7914a2..b665fd200 100644 --- a/tests/pgpro560.py +++ b/tests/pgpro560_test.py @@ -6,9 +6,6 @@ from time import sleep -module_name = 'pgpro560' - - class CheckSystemID(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -20,27 +17,27 @@ def test_pgpro560_control_file_loss(self): make backup check that backup failed """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() file = os.path.join(node.base_dir, 'data', 'global', 'pg_control') - os.remove(file) + # Not delete this file permanently + os.rename(file, os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy')) try: self.backup_node(backup_dir, 'node', node, options=['--stream']) # we should die here because exception is what we expect to happen self.assertEqual( - 1, 0, - "Expecting Error because pg_control was deleted.\n " - "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) + 1, 0, + "Expecting Error because pg_control was deleted.\n " + "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: self.assertTrue( 'ERROR: Could not open file' in e.message and @@ -48,8 +45,8 @@ def test_pgpro560_control_file_loss(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) + # Return this file to avoid Postger fail + os.rename(os.path.join(node.base_dir, 'data', 'global', 'pg_control_copy'), file) def test_pgpro560_systemid_mismatch(self): """ @@ -58,21 +55,20 @@ def test_pgpro560_systemid_mismatch(self): feed to backup PGDATA from node1 and PGPORT from node2 check that backup failed """ - fname = self.id().split('.')[3] node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums']) node1.slow_start() node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2'), + base_dir=os.path.join(self.module_name, self.fname, 'node2'), set_replication=True, initdb_params=['--data-checksums']) node2.slow_start() - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) @@ -125,6 +121,3 @@ def test_pgpro560_systemid_mismatch(self): e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/pgpro589.py b/tests/pgpro589_test.py similarity index 90% rename from tests/pgpro589.py rename to tests/pgpro589_test.py index d6381a8b5..8ce8e1f56 100644 --- a/tests/pgpro589.py +++ b/tests/pgpro589_test.py @@ -5,9 +5,6 @@ import subprocess -module_name = 'pgpro589' - - class ArchiveCheck(ProbackupTest, unittest.TestCase): def test_pgpro589(self): @@ -17,12 +14,11 @@ def test_pgpro589(self): check that backup status equal to ERROR check that no files where copied to backup catalogue """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -74,6 +70,3 @@ def test_pgpro589(self): "\n Start LSN was not found in archive but datafiles where " "copied to backup catalogue.\n For example: {0}\n " "It is not optimal".format(file)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/ptrack.py b/tests/ptrack_test.py similarity index 89% rename from tests/ptrack.py rename to tests/ptrack_test.py index 5878f0700..7b5bc416b 100644 --- a/tests/ptrack.py +++ b/tests/ptrack_test.py @@ -10,13 +10,10 @@ from threading import Thread -module_name = 'ptrack' - - class PtrackTest(ProbackupTest, unittest.TestCase): def setUp(self): if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') self.fname = self.id().split('.')[3] # @unittest.skip("skip") @@ -24,9 +21,11 @@ def test_drop_rel_during_backup_ptrack(self): """ drop relation during ptrack backup """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -83,18 +82,15 @@ def test_drop_rel_during_backup_ptrack(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_without_full(self): """ptrack backup without validated full backup""" node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -123,18 +119,15 @@ def test_ptrack_without_full(self): self.show_pb(backup_dir, 'node')[0]['status'], "ERROR") - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_threads(self): """ptrack multi thread backup mode""" node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -154,9 +147,6 @@ def test_ptrack_threads(self): backup_type="ptrack", options=["-j", "4"]) self.assertEqual(self.show_pb(backup_dir, 'node')[0]['status'], "OK") - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_stop_pg(self): """ @@ -164,9 +154,9 @@ def test_ptrack_stop_pg(self): restart node, check that ptrack backup can be taken """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -191,18 +181,15 @@ def test_ptrack_stop_pg(self): backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_multi_timeline_backup(self): """ t2 /------P2 t1 ------F---*-----P1 """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -267,9 +254,6 @@ def test_ptrack_multi_timeline_backup(self): self.assertEqual('0', balance) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_multi_timeline_backup_1(self): """ @@ -280,9 +264,9 @@ def test_ptrack_multi_timeline_backup_1(self): t2 /------P2 t1 ---F--------* """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -341,17 +325,14 @@ def test_ptrack_multi_timeline_backup_1(self): self.assertEqual('0', balance) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_eat_my_data(self): """ PGPRO-4051 """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -370,7 +351,7 @@ def test_ptrack_eat_my_data(self): self.backup_node(backup_dir, 'node', node) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) pgbench = node.pgbench(options=['-T', '300', '-c', '1', '--no-vacuum']) @@ -394,7 +375,7 @@ def test_ptrack_eat_my_data(self): self.switch_wal_segment(node) - result = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + result = node.table_checksum("pgbench_accounts") node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -415,21 +396,16 @@ def test_ptrack_eat_my_data(self): # Logical comparison self.assertEqual( result, - node_restored.safe_psql( - 'postgres', - 'SELECT * FROM pgbench_accounts'), + node.table_checksum("pgbench_accounts"), 'Data loss') - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_simple(self): """make node, make full and ptrack stream backups," " restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -464,10 +440,10 @@ def test_ptrack_simple(self): if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -487,17 +463,14 @@ def test_ptrack_simple(self): # Logical comparison self.assertEqual( result, - node_restored.safe_psql("postgres", "SELECT * FROM t_heap")) - - # Clean after yourself - self.del_test_dir(module_name, self.fname) + node_restored.table_checksum("t_heap")) # @unittest.skip("skip") def test_ptrack_unprivileged(self): """""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -543,8 +516,7 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -583,8 +555,8 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -619,6 +591,42 @@ def test_ptrack_unprivileged(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) node.safe_psql( "backupdb", @@ -637,11 +645,8 @@ def test_ptrack_unprivileged(self): if ProbackupTest.enterprise: node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") - - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + 'GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;') self.backup_node( backup_dir, 'node', node, @@ -656,9 +661,9 @@ def test_ptrack_unprivileged(self): # @unittest.expectedFailure def test_ptrack_enable(self): """make ptrack without full backup, should result in error""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30s', @@ -694,9 +699,6 @@ def test_ptrack_enable(self): ' CMD: {1}'.format(repr(e.message), self.cmd) ) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_disable(self): @@ -705,9 +707,9 @@ def test_ptrack_disable(self): enable ptrack, restart postgresql, take ptrack backup which should fail """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -759,15 +761,12 @@ def test_ptrack_disable(self): ) ) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_uncommitted_xact(self): """make ptrack backup while there is uncommitted open transaction""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -797,7 +796,7 @@ def test_ptrack_uncommitted_xact(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -817,16 +816,15 @@ def test_ptrack_uncommitted_xact(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_vacuum_full(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -879,7 +877,7 @@ def test_ptrack_vacuum_full(self): process.join() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') @@ -902,18 +900,15 @@ def test_ptrack_vacuum_full(self): node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_vacuum_truncate(self): """make node, create table, take full backup, delete last 3 pages, vacuum relation, take ptrack backup, take second ptrack backup, restore last ptrack backup and check data correctness""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -960,7 +955,7 @@ def test_ptrack_vacuum_truncate(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() old_tablespace = self.get_tblspace_path(node, 'somedata') @@ -985,18 +980,17 @@ def test_ptrack_vacuum_truncate(self): node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_get_block(self): """ make node, make full and ptrack stream backups, restore them and check data correctness """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -1036,7 +1030,7 @@ def test_ptrack_get_block(self): if self.paranoia: pgdata = self.pgdata_content(node.data_dir) - result = node.safe_psql("postgres", "SELECT * FROM t_heap") + result = node.table_checksum("t_heap") node.cleanup() self.restore_node(backup_dir, 'node', node, options=["-j", "4"]) @@ -1050,18 +1044,15 @@ def test_ptrack_get_block(self): # Logical comparison self.assertEqual( result, - node.safe_psql("postgres", "SELECT * FROM t_heap")) - - # Clean after yourself - self.del_test_dir(module_name, self.fname) + node.table_checksum("t_heap")) # @unittest.skip("skip") def test_ptrack_stream(self): """make node, make full and ptrack stream backups, restore them and check data correctness""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1084,7 +1075,7 @@ def test_ptrack_stream(self): " as t_seq, md5(i::text) as text, md5(i::text)::tsvector" " as tsvector from generate_series(0,100) i") - full_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node( backup_dir, 'node', node, options=['--stream']) @@ -1095,7 +1086,7 @@ def test_ptrack_stream(self): " md5(i::text) as text, md5(i::text)::tsvector as tsvector" " from generate_series(100,200) i") - ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_result = node.table_checksum("t_heap") ptrack_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) @@ -1117,7 +1108,7 @@ def test_ptrack_stream(self): repr(self.output), self.cmd) ) node.slow_start() - full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -1137,19 +1128,16 @@ def test_ptrack_stream(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() - ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_result_new = node.table_checksum("t_heap") self.assertEqual(ptrack_result, ptrack_result_new) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_archive(self): """make archive node, make full and ptrack backups, check data correctness in restored instance""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1174,7 +1162,7 @@ def test_ptrack_archive(self): " md5(i::text)::tsvector as tsvector" " from generate_series(0,100) i") - full_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result = node.table_checksum("t_heap") full_backup_id = self.backup_node(backup_dir, 'node', node) full_target_time = self.show_pb( backup_dir, 'node', full_backup_id)['recovery-time'] @@ -1187,7 +1175,7 @@ def test_ptrack_archive(self): " md5(i::text)::tsvector as tsvector" " from generate_series(100,200) i") - ptrack_result = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_result = node.table_checksum("t_heap") ptrack_backup_id = self.backup_node( backup_dir, 'node', node, backup_type='ptrack') ptrack_target_time = self.show_pb( @@ -1220,7 +1208,7 @@ def test_ptrack_archive(self): ) node.slow_start() - full_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + full_result_new = node.table_checksum("t_heap") self.assertEqual(full_result, full_result_new) node.cleanup() @@ -1245,14 +1233,11 @@ def test_ptrack_archive(self): self.compare_pgdata(pgdata, pgdata_restored) node.slow_start() - ptrack_result_new = node.safe_psql("postgres", "SELECT * FROM t_heap") + ptrack_result_new = node.table_checksum("t_heap") self.assertEqual(ptrack_result, ptrack_result_new) node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_ptrack_pgpro417(self): """ @@ -1260,9 +1245,9 @@ def test_ptrack_pgpro417(self): delete ptrack backup. Try to take ptrack backup, which should fail. Actual only for PTRACK 1.x """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1278,9 +1263,6 @@ def test_ptrack_pgpro417(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql( - "postgres", - "SELECT * FROM t_heap") backup_id = self.backup_node( backup_dir, 'node', node, @@ -1295,7 +1277,7 @@ def test_ptrack_pgpro417(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'node', node, backup_type='ptrack', options=["--stream"]) @@ -1328,9 +1310,6 @@ def test_ptrack_pgpro417(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_page_pgpro417(self): """ @@ -1338,9 +1317,9 @@ def test_page_pgpro417(self): delete page backup. Try to take ptrack backup, which should fail. Actual only for PTRACK 1.x """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1357,7 +1336,7 @@ def test_page_pgpro417(self): "postgres", "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") # PAGE BACKUP node.safe_psql( @@ -1365,7 +1344,7 @@ def test_page_pgpro417(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector " "from generate_series(100,200) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'node', node, backup_type='page') @@ -1394,9 +1373,6 @@ def test_page_pgpro417(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_full_pgpro417(self): """ @@ -1404,9 +1380,9 @@ def test_full_pgpro417(self): Try to take ptrack backup, which should fail. Relevant only for PTRACK 1.x """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1424,7 +1400,7 @@ def test_full_pgpro417(self): " md5(i::text)::tsvector as tsvector " " from generate_series(0,100) i" ) - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") self.backup_node(backup_dir, 'node', node, options=["--stream"]) # SECOND FULL BACKUP @@ -1434,7 +1410,7 @@ def test_full_pgpro417(self): " md5(i::text)::tsvector as tsvector" " from generate_series(100,200) i" ) - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'node', node, options=["--stream"]) @@ -1466,18 +1442,15 @@ def test_full_pgpro417(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_create_db(self): """ Make node, take full backup, create database db1, take ptrack backup, restore database and check it presense """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1498,7 +1471,7 @@ def test_create_db(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - node.safe_psql("postgres", "SELECT * FROM t_heap") + node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node, options=["--stream"]) @@ -1520,7 +1493,7 @@ def test_create_db(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1581,9 +1554,6 @@ def test_create_db(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_create_db_on_replica(self): """ @@ -1592,9 +1562,9 @@ def test_create_db_on_replica(self): create database db1, take ptrack backup from replica, restore database and check it presense """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1616,7 +1586,7 @@ def test_create_db_on_replica(self): "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node( @@ -1669,7 +1639,7 @@ def test_create_db_on_replica(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1682,16 +1652,13 @@ def test_create_db_on_replica(self): node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_alter_table_set_tablespace_ptrack(self): """Make node, create tablespace with table, take full backup, alter tablespace location, take ptrack backup, restore database.""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1724,8 +1691,7 @@ def test_alter_table_set_tablespace_ptrack(self): # sys.exit(1) # PTRACK BACKUP - #result = node.safe_psql( - # "postgres", "select * from t_heap") + #result = node.table_checksum("t_heap") self.backup_node( backup_dir, 'node', node, backup_type='ptrack', @@ -1738,7 +1704,7 @@ def test_alter_table_set_tablespace_ptrack(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -1767,22 +1733,18 @@ def test_alter_table_set_tablespace_ptrack(self): node_restored, {'port': node_restored.port}) node_restored.slow_start() -# result_new = node_restored.safe_psql( -# "postgres", "select * from t_heap") +# result_new = node_restored.table_checksum("t_heap") # # self.assertEqual(result, result_new, 'lost some data after restore') - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_alter_database_set_tablespace_ptrack(self): """Make node, create tablespace with database," " take full backup, alter tablespace location," " take ptrack backup, restore database.""" - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1819,7 +1781,7 @@ def test_alter_database_set_tablespace_ptrack(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( backup_dir, 'node', @@ -1840,18 +1802,15 @@ def test_alter_database_set_tablespace_ptrack(self): node_restored.port = node.port node_restored.slow_start() - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_drop_tablespace(self): """ Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1874,7 +1833,7 @@ def test_drop_tablespace(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - result = node.safe_psql("postgres", "select * from t_heap") + result = node.table_checksum("t_heap") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=["--stream"]) @@ -1928,24 +1887,21 @@ def test_drop_tablespace(self): "Expecting Error because " "tablespace 'somedata' should not be present") - result_new = node.safe_psql("postgres", "select * from t_heap") + result_new = node.table_checksum("t_heap") self.assertEqual(result, result_new) if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_alter_tablespace(self): """ Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -1969,7 +1925,7 @@ def test_ptrack_alter_tablespace(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(i::text)::tsvector as tsvector from generate_series(0,100) i") - result = node.safe_psql("postgres", "select * from t_heap") + result = node.table_checksum("t_heap") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=["--stream"]) @@ -1978,7 +1934,7 @@ def test_ptrack_alter_tablespace(self): "postgres", "alter table t_heap set tablespace somedata") # GET LOGICAL CONTENT FROM NODE - result = node.safe_psql("postgres", "select * from t_heap") + result = node.table_checksum("t_heap") # FIRTS PTRACK BACKUP self.backup_node( @@ -1991,7 +1947,7 @@ def test_ptrack_alter_tablespace(self): # Restore ptrack backup restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'restored_node')) + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path_new = self.get_tblspace_path( restored_node, 'somedata_restored') @@ -2010,8 +1966,7 @@ def test_ptrack_alter_tablespace(self): restored_node.slow_start() # COMPARE LOGICAL CONTENT - result_new = restored_node.safe_psql( - "postgres", "select * from t_heap") + result_new = restored_node.table_checksum("t_heap") self.assertEqual(result, result_new) restored_node.cleanup() @@ -2045,22 +2000,18 @@ def test_ptrack_alter_tablespace(self): restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.safe_psql( - "postgres", "select * from t_heap") + result_new = restored_node.table_checksum("t_heap") self.assertEqual(result, result_new) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_multiple_segments(self): """ Make node, create table, alter table tablespace, take ptrack backup, move table from tablespace, take ptrack backup """ - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -2079,6 +2030,7 @@ def test_ptrack_multiple_segments(self): # CREATE TABLE node.pgbench_init(scale=100, options=['--tablespace=somedata']) + result = node.table_checksum("pgbench_accounts") # FULL BACKUP self.backup_node(backup_dir, 'node', node, options=['--stream']) @@ -2115,7 +2067,7 @@ def test_ptrack_multiple_segments(self): # GET LOGICAL CONTENT FROM NODE # it`s stupid, because hint`s are ignored by ptrack - result = node.safe_psql("postgres", "select * from pgbench_accounts") + result = node.table_checksum("pgbench_accounts") # FIRTS PTRACK BACKUP self.backup_node( backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) @@ -2125,7 +2077,7 @@ def test_ptrack_multiple_segments(self): # RESTORE NODE restored_node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'restored_node')) + base_dir=os.path.join(self.module_name, self.fname, 'restored_node')) restored_node.cleanup() tblspc_path = self.get_tblspace_path(node, 'somedata') tblspc_path_new = self.get_tblspace_path( @@ -2148,9 +2100,7 @@ def test_ptrack_multiple_segments(self): restored_node, {'port': restored_node.port}) restored_node.slow_start() - result_new = restored_node.safe_psql( - "postgres", - "select * from pgbench_accounts") + result_new = restored_node.table_checksum("pgbench_accounts") # COMPARE RESTORED FILES self.assertEqual(result, result_new, 'data is lost') @@ -2158,9 +2108,6 @@ def test_ptrack_multiple_segments(self): if self.paranoia: self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_atexit_fail(self): """ @@ -2168,14 +2115,14 @@ def test_atexit_fail(self): Relevant only for PTRACK 1.x """ node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'max_connections': '15'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2210,9 +2157,6 @@ def test_atexit_fail(self): "select * from pg_is_in_backup()").rstrip(), "f") - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_clean(self): @@ -2221,12 +2165,12 @@ def test_ptrack_clean(self): Relevant only for PTRACK 1.x """ node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2318,9 +2262,6 @@ def test_ptrack_clean(self): # check that ptrack bits are cleaned self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_ptrack_clean_replica(self): """ @@ -2329,14 +2270,14 @@ def test_ptrack_clean_replica(self): Relevant only for PTRACK 1.x """ master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '30s'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2344,7 +2285,7 @@ def test_ptrack_clean_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2456,18 +2397,16 @@ def test_ptrack_clean_replica(self): # check that ptrack bits are cleaned self.check_ptrack_clean(idx_ptrack[i], idx_ptrack[i]['size']) - # Clean after yourself - self.del_test_dir(module_name, self.fname) # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_cluster_on_btree(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2519,18 +2458,15 @@ def test_ptrack_cluster_on_btree(self): if node.major_version < 11: self.check_ptrack_map_sanity(node, idx_ptrack) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_cluster_on_gist(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2590,18 +2526,15 @@ def test_ptrack_cluster_on_gist(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_cluster_on_btree_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2614,7 +2547,7 @@ def test_ptrack_cluster_on_btree_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2678,7 +2611,7 @@ def test_ptrack_cluster_on_btree_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node) @@ -2686,17 +2619,14 @@ def test_ptrack_cluster_on_btree_replica(self): pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_cluster_on_gist_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2709,7 +2639,7 @@ def test_ptrack_cluster_on_gist_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2779,7 +2709,7 @@ def test_ptrack_cluster_on_gist_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node) @@ -2788,20 +2718,17 @@ def test_ptrack_cluster_on_gist_replica(self): pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_empty(self): """Take backups of every available types and check that PTRACK is clean""" node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2839,7 +2766,7 @@ def test_ptrack_empty(self): node.safe_psql('postgres', 'checkpoint') node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() tblspace1 = self.get_tblspace_path(node, 'somedata') @@ -2864,9 +2791,6 @@ def test_ptrack_empty(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_empty_replica(self): @@ -2875,12 +2799,12 @@ def test_ptrack_empty_replica(self): and check that PTRACK on replica is clean """ master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -2893,7 +2817,7 @@ def test_ptrack_empty_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -2949,7 +2873,7 @@ def test_ptrack_empty_replica(self): pgdata = self.pgdata_content(replica.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2960,19 +2884,16 @@ def test_ptrack_empty_replica(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_truncate(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3041,13 +2962,10 @@ def test_ptrack_truncate(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_basic_ptrack_truncate_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -3056,7 +2974,7 @@ def test_basic_ptrack_truncate_replica(self): 'archive_timeout': '10s', 'checkpoint_timeout': '5min'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3069,7 +2987,7 @@ def test_basic_ptrack_truncate_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3151,7 +3069,7 @@ def test_basic_ptrack_truncate_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) @@ -3169,19 +3087,16 @@ def test_basic_ptrack_truncate_replica(self): 'postgres', 'select 1') - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3257,20 +3172,17 @@ def test_ptrack_vacuum(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_vacuum_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3283,7 +3195,7 @@ def test_ptrack_vacuum_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3356,7 +3268,7 @@ def test_ptrack_vacuum_replica(self): pgdata = self.pgdata_content(replica.data_dir) node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', node, data_dir=node.data_dir) @@ -3364,19 +3276,16 @@ def test_ptrack_vacuum_replica(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_bits_frozen(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3443,18 +3352,15 @@ def test_ptrack_vacuum_bits_frozen(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_ptrack_vacuum_bits_frozen_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3467,7 +3373,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3541,19 +3447,16 @@ def test_ptrack_vacuum_bits_frozen_replica(self): pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_bits_visibility(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3620,19 +3523,16 @@ def test_ptrack_vacuum_bits_visibility(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored, comparision_exclusion) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_full_2(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, pg_options={ 'wal_log_hints': 'on' }) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3698,19 +3598,16 @@ def test_ptrack_vacuum_full_2(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_full_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3722,7 +3619,7 @@ def test_ptrack_vacuum_full_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3799,19 +3696,16 @@ def test_ptrack_vacuum_full_replica(self): pgdata_restored = self.pgdata_content(replica.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_truncate_2(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3867,7 +3761,7 @@ def test_ptrack_vacuum_truncate_2(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -3875,19 +3769,16 @@ def test_ptrack_vacuum_truncate_2(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_vacuum_truncate_replica(self): master = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -3900,7 +3791,7 @@ def test_ptrack_vacuum_truncate_replica(self): self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -3972,7 +3863,7 @@ def test_ptrack_vacuum_truncate_replica(self): pgdata = self.pgdata_content(replica.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'replica', node_restored) @@ -3980,9 +3871,6 @@ def test_ptrack_vacuum_truncate_replica(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - @unittest.skip("skip") def test_ptrack_recovery(self): """ @@ -3990,12 +3878,12 @@ def test_ptrack_recovery(self): Actual only for PTRACK 1.x """ node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4041,14 +3929,11 @@ def test_ptrack_recovery(self): # check that ptrack has correct bits after recovery self.check_ptrack_recovery(idx_ptrack[i]) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_recovery_1(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -4056,7 +3941,7 @@ def test_ptrack_recovery_1(self): 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4117,7 +4002,7 @@ def test_ptrack_recovery_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -4126,19 +4011,16 @@ def test_ptrack_recovery_1(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_zero_changes(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4170,14 +4052,11 @@ def test_ptrack_zero_changes(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_ptrack_pg_resetxlog(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums'], @@ -4185,7 +4064,7 @@ def test_ptrack_pg_resetxlog(self): 'shared_buffers': '512MB', 'max_wal_size': '3GB'}) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4284,7 +4163,7 @@ def test_ptrack_pg_resetxlog(self): # pgdata = self.pgdata_content(node.data_dir) # # node_restored = self.make_simple_node( -# base_dir=os.path.join(module_name, self.fname, 'node_restored')) +# base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) # node_restored.cleanup() # # self.restore_node( @@ -4293,19 +4172,16 @@ def test_ptrack_pg_resetxlog(self): # pgdata_restored = self.pgdata_content(node_restored.data_dir) # self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_ptrack_map(self): node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -4457,9 +4333,6 @@ def test_corrupt_ptrack_map(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, self.fname) - # @unittest.skip("skip") def test_horizon_lsn_ptrack(self): """ @@ -4473,9 +4346,9 @@ def test_horizon_lsn_ptrack(self): self.version_to_num('2.4.15'), 'You need pg_probackup old_binary =< 2.4.15 for this test') - backup_dir = os.path.join(self.tmp_path, module_name, self.fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, self.fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -4522,6 +4395,3 @@ def test_horizon_lsn_ptrack(self): # make sure that backup size is exactly the same self.assertEqual(delta_bytes, ptrack_bytes) - - # Clean after yourself - self.del_test_dir(module_name, self.fname) diff --git a/tests/remote.py b/tests/remote_test.py similarity index 83% rename from tests/remote.py rename to tests/remote_test.py index 4d46447f0..2d36d7346 100644 --- a/tests/remote.py +++ b/tests/remote_test.py @@ -5,21 +5,17 @@ from .helpers.cfs_helpers import find_by_name -module_name = 'remote' - - class RemoteTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_remote_sanity(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -45,6 +41,3 @@ def test_remote_sanity(self): # e.message, # "\n Unexpected Error Message: {0}\n CMD: {1}".format( # repr(e.message), self.cmd)) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/replica.py b/tests/replica_test.py similarity index 83% rename from tests/replica.py rename to tests/replica_test.py index 8fb89c222..17fc5a823 100644 --- a/tests/replica.py +++ b/tests/replica_test.py @@ -9,8 +9,6 @@ from time import sleep -module_name = 'replica' - class ReplicaTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -21,16 +19,14 @@ def test_replica_switchover(self): over the course of several switchovers https://www.postgresql.org/message-id/54b059d4-2b48-13a4-6f43-95a087c92367%40postgrespro.ru """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -41,7 +37,7 @@ def test_replica_switchover(self): # take full backup and restore it self.backup_node(backup_dir, 'node1', node1, options=['--stream']) node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() # create replica @@ -92,9 +88,6 @@ def test_replica_switchover(self): # https://github.com/postgrespro/pg_probackup/issues/251 self.validate_pb(backup_dir) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_replica_stream_ptrack_backup(self): @@ -103,16 +96,15 @@ def test_replica_stream_ptrack_backup(self): take full stream backup from replica """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') if self.pg_config_version > self.version_to_num('9.6.0'): - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) @@ -133,19 +125,19 @@ def test_replica_stream_ptrack_backup(self): "create table t_heap as select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,256) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") # take full backup and restore it self.backup_node(backup_dir, 'master', master, options=['--stream']) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) self.set_replica(master, replica) # Check data correctness on replica replica.slow_start(replica=True) - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, take FULL backup from replica, @@ -156,7 +148,7 @@ def test_replica_stream_ptrack_backup(self): "insert into t_heap select i as id, md5(i::text) as text, " "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,512) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") self.add_instance(backup_dir, 'replica', replica) backup_id = self.backup_node( @@ -172,7 +164,7 @@ def test_replica_stream_ptrack_backup(self): # RESTORE FULL BACKUP TAKEN FROM PREVIOUS STEP node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) @@ -181,7 +173,7 @@ def test_replica_stream_ptrack_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, take PTRACK backup from replica, @@ -193,7 +185,7 @@ def test_replica_stream_ptrack_backup(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(512,768) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', @@ -216,22 +208,18 @@ def test_replica_stream_ptrack_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_archive_page_backup(self): """ make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -240,8 +228,7 @@ def test_replica_archive_page_backup(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -250,7 +237,7 @@ def test_replica_archive_page_backup(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'master', master) @@ -261,7 +248,7 @@ def test_replica_archive_page_backup(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,2560) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'master', master, backup_type='page') @@ -275,7 +262,7 @@ def test_replica_archive_page_backup(self): replica.slow_start(replica=True) # Check data correctness on replica - after = replica.safe_psql("postgres", "SELECT * FROM t_heap") + after = replica.table_checksum("t_heap") self.assertEqual(before, after) # Change data on master, take FULL backup from replica, @@ -287,20 +274,10 @@ def test_replica_archive_page_backup(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(256,25120) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") self.wait_until_replica_catch_with_master(master, replica) - master.pgbench_init(scale=5) - # Continuous making some changes on master, - # because WAL archiving on replica in idle DB in PostgreSQL is broken: - # replica will not archive the previous WAL until it receives new records in the next WAL file, - # this "lazy" archiving can be seen in src/backend/replication/walreceiver.c:XLogWalRcvWrite() - # (see !XLByteInSeg checking and XLogArchiveNotify() calling). - pgbench = master.pgbench( - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - options=['-T', '3', '-c', '1', '--no-vacuum']) - backup_id = self.backup_node( backup_dir, 'replica', replica, options=[ @@ -309,16 +286,13 @@ def test_replica_archive_page_backup(self): '--master-db=postgres', '--master-port={0}'.format(master.port)]) - pgbench.wait() - pgbench.stdout.close() - self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) # RESTORE FULL BACKUP TAKEN FROM replica node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node')) + base_dir=os.path.join(self.module_name, self.fname, 'node')) node.cleanup() self.restore_node(backup_dir, 'replica', data_dir=node.data_dir) @@ -327,13 +301,15 @@ def test_replica_archive_page_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM t_heap") + after = node.table_checksum("t_heap") self.assertEqual(before, after) node.cleanup() # Change data on master, make PAGE backup from replica, # restore taken backup and check that restored data equal # to original data + master.pgbench_init(scale=5) + pgbench = master.pgbench( options=['-T', '30', '-c', '2', '--no-vacuum']) @@ -350,7 +326,7 @@ def test_replica_archive_page_backup(self): self.switch_wal_segment(master) - before = master.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + before = master.table_checksum("pgbench_accounts") self.validate_pb(backup_dir, 'replica') self.assertEqual( @@ -366,7 +342,7 @@ def test_replica_archive_page_backup(self): node.slow_start() # CHECK DATA CORRECTNESS - after = node.safe_psql("postgres", "SELECT * FROM pgbench_accounts") + after = master.table_checksum("pgbench_accounts") self.assertEqual( before, after, 'Restored data is not equal to original') @@ -374,27 +350,22 @@ def test_replica_archive_page_backup(self): self.backup_node( backup_dir, 'node', node, options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_make_replica_via_restore(self): """ make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -403,7 +374,7 @@ def test_basic_make_replica_via_restore(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'master', master) @@ -414,7 +385,7 @@ def test_basic_make_replica_via_restore(self): "md5(repeat(i::text,10))::tsvector as tsvector " "from generate_series(0,8192) i") - before = master.safe_psql("postgres", "SELECT * FROM t_heap") + before = master.table_checksum("t_heap") backup_id = self.backup_node( backup_dir, 'master', master, backup_type='page') @@ -432,9 +403,6 @@ def test_basic_make_replica_via_restore(self): backup_dir, 'replica', replica, options=['--archive-timeout=30s', '--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_take_backup_from_delayed_replica(self): """ @@ -442,17 +410,15 @@ def test_take_backup_from_delayed_replica(self): restore full backup as delayed replica, launch pgbench, take FULL, PAGE and DELTA backups from replica """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'archive_timeout': '10s'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -461,7 +427,7 @@ def test_take_backup_from_delayed_replica(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'master', master) @@ -541,24 +507,17 @@ def test_take_backup_from_delayed_replica(self): pgbench.wait() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote(self): """ start backup from replica, during backup promote replica check that backup is failed """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -567,8 +526,7 @@ def test_replica_promote(self): 'max_wal_size': '32MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -577,7 +535,7 @@ def test_replica_promote(self): master.slow_start() replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.backup_node(backup_dir, 'master', master) @@ -638,22 +596,15 @@ def test_replica_promote(self): 'setting its status to ERROR'.format(backup_id), log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_stop_lsn_null_offset(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -661,8 +612,7 @@ def test_replica_stop_lsn_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -678,7 +628,7 @@ def test_replica_stop_lsn_null_offset(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -727,21 +677,16 @@ def test_replica_stop_lsn_null_offset(self): # Clean after yourself gdb_checkpointer.kill() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_replica_stop_lsn_null_offset_next_record(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -749,8 +694,7 @@ def test_replica_stop_lsn_null_offset_next_record(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -760,13 +704,12 @@ def test_replica_stop_lsn_null_offset_next_record(self): # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) self.backup_node(backup_dir, 'master', master) # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -796,6 +739,7 @@ def test_replica_stop_lsn_null_offset_next_record(self): '--stream'], gdb=True) + # Attention! this breakpoint is set to a probackup internal function, not a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb.remove_all_breakpoints() @@ -827,22 +771,20 @@ def test_replica_stop_lsn_null_offset_next_record(self): log_content) self.assertIn( - 'LOG: stop_lsn: 0/4000000', + 'INFO: stop_lsn: 0/4000000', log_content) self.assertTrue(self.show_pb(backup_dir, 'replica')[0]['status'] == 'DONE') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_replica_null_offset(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -850,8 +792,7 @@ def test_archive_replica_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -863,7 +804,7 @@ def test_archive_replica_null_offset(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -916,17 +857,13 @@ def test_archive_replica_null_offset(self): print(output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_replica_not_null_offset(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -934,8 +871,7 @@ def test_archive_replica_not_null_offset(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -947,7 +883,7 @@ def test_archive_replica_not_null_offset(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'node', replica) @@ -1000,24 +936,17 @@ def test_archive_replica_not_null_offset(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_toast(self): """ make archive master, take full and page archive backups from master, set replica, make archive backup from replica """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1026,8 +955,7 @@ def test_replica_toast(self): 'shared_buffers': '128MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1043,7 +971,7 @@ def test_replica_toast(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1109,21 +1037,15 @@ def test_replica_toast(self): # Clean after yourself gdb_checkpointer.kill() - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_start_stop_lsn_in_the_same_segno(self): """ """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1132,8 +1054,7 @@ def test_start_stop_lsn_in_the_same_segno(self): 'shared_buffers': '128MB'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1142,13 +1063,12 @@ def test_start_stop_lsn_in_the_same_segno(self): # freeze bgwriter to get rid of RUNNING XACTS records bgwriter_pid = master.auxiliary_pids[ProcessType.BackgroundWriter][0] - gdb_checkpointer = self.gdb_attach(bgwriter_pid) self.backup_node(backup_dir, 'master', master, options=['--stream']) # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1193,17 +1113,13 @@ def test_start_stop_lsn_in_the_same_segno(self): '--stream'], return_id=False) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_replica_promote_1(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1211,8 +1127,7 @@ def test_replica_promote_1(self): 'wal_level': 'replica'}) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1225,7 +1140,7 @@ def test_replica_promote_1(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1268,17 +1183,13 @@ def test_replica_promote_1(self): os.path.exists(wal_file_partial), "File {0} disappeared".format(wal_file_partial)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote_2(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) @@ -1293,7 +1204,7 @@ def test_replica_promote_2(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1317,9 +1228,6 @@ def test_replica_promote_2(self): backup_dir, 'master', replica, data_dir=replica.data_dir, backup_type='page') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote_archive_delta(self): """ @@ -1327,10 +1235,9 @@ def test_replica_promote_archive_delta(self): t2 /-------> t1 --F---D1--D2-- """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1338,8 +1245,7 @@ def test_replica_promote_archive_delta(self): 'archive_timeout': '30s'}) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1354,7 +1260,7 @@ def test_replica_promote_archive_delta(self): # Create replica node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() self.restore_node(backup_dir, 'node', node2, node2.data_dir) @@ -1442,9 +1348,6 @@ def test_replica_promote_archive_delta(self): pgdata_restored = self.pgdata_content(node1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_promote_archive_page(self): """ @@ -1452,10 +1355,9 @@ def test_replica_promote_archive_page(self): t2 /-------> t1 --F---P1--P2-- """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1463,8 +1365,7 @@ def test_replica_promote_archive_page(self): 'archive_timeout': '30s'}) if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1479,7 +1380,7 @@ def test_replica_promote_archive_page(self): # Create replica node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() self.restore_node(backup_dir, 'node', node2, node2.data_dir) @@ -1570,23 +1471,18 @@ def test_replica_promote_archive_page(self): pgdata_restored = self.pgdata_content(node1.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_parent_choosing(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') master = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'master'), + base_dir=os.path.join(self.module_name, self.fname, 'master'), set_replication=True, initdb_params=['--data-checksums']) if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( + self.skipTest( 'Skipped because backup from replica is not supported in PG 9.5') self.init_pb(backup_dir) @@ -1598,7 +1494,7 @@ def test_parent_choosing(self): # Create replica replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() self.restore_node(backup_dir, 'master', replica) @@ -1640,17 +1536,13 @@ def test_parent_choosing(self): backup_dir, 'replica', replica, backup_type='delta', options=['--stream']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_instance_from_the_past(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1687,17 +1579,13 @@ def test_instance_from_the_past(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_replica_via_basebackup(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'hot_standby': 'on'}) @@ -1747,7 +1635,7 @@ def test_replica_via_basebackup(self): node.slow_start() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() pg_basebackup_path = self.get_bin_path('pg_basebackup') @@ -1761,9 +1649,6 @@ def test_replica_via_basebackup(self): self.set_auto_conf(node_restored, {'port': node_restored.port}) node_restored.slow_start(replica=True) - # Clean after yourself - self.del_test_dir(module_name, fname) - # TODO: # null offset STOP LSN and latest record in previous segment is conrecord (manual only) # archiving from promoted delayed replica diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 000000000..e2ac18bea --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,13 @@ +# Testgres can be installed in the following ways: +# 1. From a pip package (recommended) +# testgres==1.8.5 +# 2. From a specific Git branch, tag or commit +# git+https://github.com/postgrespro/testgres.git@ +# 3. From a local directory +# /path/to/local/directory/testgres +git+https://github.com/postgrespro/testgres.git@archive-command-exec#egg=testgres-pg_probackup2&subdirectory=testgres/plugins/pg_probackup2 +allure-pytest +deprecation +pexpect +pytest==7.4.3 +pytest-xdist diff --git a/tests/restore.py b/tests/restore_test.py similarity index 84% rename from tests/restore.py rename to tests/restore_test.py index bbdadeb23..b6664252e 100644 --- a/tests/restore.py +++ b/tests/restore_test.py @@ -3,31 +3,27 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException import subprocess import sys -from time import sleep from datetime import datetime, timedelta, timezone import hashlib import shutil import json +import stat from shutil import copyfile from testgres import QueryException, StartNodeException from stat import S_ISDIR -module_name = 'restore' - - class RestoreTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_restore_full_to_latest(self): """recovery to latest from full backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -38,7 +34,7 @@ def test_restore_full_to_latest(self): stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pgbench.wait() pgbench.stdout.close() - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") backup_id = self.backup_node(backup_dir, 'node', node) node.stop() @@ -64,21 +60,17 @@ def test_restore_full_to_latest(self): node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_page_to_latest(self): """recovery to latest from full + page backups""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -96,7 +88,7 @@ def test_restore_full_page_to_latest(self): backup_id = self.backup_node( backup_dir, 'node', node, backup_type="page") - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() @@ -110,21 +102,17 @@ def test_restore_full_page_to_latest(self): node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_specific_timeline(self): """recovery to target timeline""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -132,7 +120,7 @@ def test_restore_to_specific_timeline(self): node.pgbench_init(scale=2) - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") backup_id = self.backup_node(backup_dir, 'node', node) @@ -176,29 +164,25 @@ def test_restore_to_specific_timeline(self): self.assertEqual(int(recovery_target_timeline), target_tli) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_time(self): """recovery to target time""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'TimeZone': 'GMT'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() node.pgbench_init(scale=2) - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") backup_id = self.backup_node(backup_dir, 'node', node) @@ -226,21 +210,17 @@ def test_restore_to_time(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_xid_inclusive(self): """recovery to target xid""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -258,7 +238,7 @@ def test_restore_to_xid_inclusive(self): pgbench.wait() pgbench.stdout.close() - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") with node.connect("postgres") as con: res = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") con.commit() @@ -284,23 +264,19 @@ def test_restore_to_xid_inclusive(self): repr(self.output), self.cmd)) node.slow_start() - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_xid_not_inclusive(self): """recovery with target inclusive false""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -318,7 +294,7 @@ def test_restore_to_xid_not_inclusive(self): pgbench.wait() pgbench.stdout.close() - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") with node.connect("postgres") as con: result = con.execute("INSERT INTO tbl0005 VALUES ('inserted') RETURNING (xmin)") con.commit() @@ -345,27 +321,22 @@ def test_restore_to_xid_not_inclusive(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 0) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_lsn_inclusive(self): """recovery to target lsn""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -383,7 +354,7 @@ def test_restore_to_lsn_inclusive(self): pgbench.wait() pgbench.stdout.close() - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() @@ -416,27 +387,22 @@ def test_restore_to_lsn_inclusive(self): node.slow_start() - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_to_lsn_not_inclusive(self): """recovery to target lsn""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -454,7 +420,7 @@ def test_restore_to_lsn_not_inclusive(self): pgbench.wait() pgbench.stdout.close() - before = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() @@ -488,27 +454,23 @@ def test_restore_to_lsn_not_inclusive(self): node.slow_start() - after = node.safe_psql("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) self.assertEqual( len(node.execute("postgres", "SELECT * FROM tbl0005")), 1) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_ptrack_archive(self): """recovery to latest from archive full+ptrack backups""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -530,7 +492,7 @@ def test_restore_full_ptrack_archive(self): backup_id = self.backup_node( backup_dir, 'node', node, backup_type="ptrack") - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() @@ -544,25 +506,21 @@ def test_restore_full_ptrack_archive(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_ptrack(self): """recovery to latest from archive full+ptrack+ptrack backups""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], ptrack_enable=True) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -591,7 +549,7 @@ def test_restore_ptrack(self): backup_id = self.backup_node( backup_dir, 'node', node, backup_type="ptrack") - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() @@ -605,26 +563,22 @@ def test_restore_ptrack(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_ptrack_stream(self): """recovery in stream mode to latest from full + ptrack backups""" if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -647,7 +601,7 @@ def test_restore_full_ptrack_stream(self): backup_dir, 'node', node, backup_type="ptrack", options=["--stream"]) - before = node.execute("postgres", "SELECT * FROM pgbench_branches") + before = node.table_checksum("pgbench_branches") node.stop() node.cleanup() @@ -660,12 +614,9 @@ def test_restore_full_ptrack_stream(self): repr(self.output), self.cmd)) node.slow_start() - after = node.execute("postgres", "SELECT * FROM pgbench_branches") + after = node.table_checksum("pgbench_branches") self.assertEqual(before, after) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_ptrack_under_load(self): """ @@ -673,16 +624,15 @@ def test_restore_full_ptrack_under_load(self): with loads when ptrack backup do """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -732,9 +682,6 @@ def test_restore_full_ptrack_under_load(self): "postgres", "SELECT sum(delta) FROM pgbench_history") self.assertEqual(bbalance, delta) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_full_under_load_ptrack(self): """ @@ -742,16 +689,15 @@ def test_restore_full_under_load_ptrack(self): with loads when full backup do """ if not self.ptrack: - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -803,18 +749,14 @@ def test_restore_full_under_load_ptrack(self): "postgres", "SELECT sum(delta) FROM pgbench_history") self.assertEqual(bbalance, delta) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_with_tablespace_mapping_1(self): """recovery using tablespace-mapping option""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -940,18 +882,14 @@ def test_restore_with_tablespace_mapping_1(self): result = node.execute("postgres", "SELECT id FROM test OFFSET 1") self.assertEqual(result[0][0], 2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_with_tablespace_mapping_2(self): """recovery using tablespace-mapping option and page backup""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1017,18 +955,14 @@ def test_restore_with_tablespace_mapping_2(self): count = node.execute("postgres", "SELECT count(*) FROM tbl1") self.assertEqual(count[0][0], 4) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_with_missing_or_corrupted_tablespace_map(self): """restore backup with missing or corrupted tablespace_map""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1051,7 +985,7 @@ def test_restore_with_missing_or_corrupted_tablespace_map(self): pgdata = self.pgdata_content(node.data_dir) node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() olddir = self.get_tblspace_path(node, 'tblspace') @@ -1147,22 +1081,18 @@ def test_restore_with_missing_or_corrupted_tablespace_map(self): pgdata_restored = self.pgdata_content(node2.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_archive_node_backup_stream_restore_to_recovery_time(self): """ make node with archiving, make stream backup, make PITR to Recovery Time """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1195,9 +1125,6 @@ def test_archive_node_backup_stream_restore_to_recovery_time(self): result = node.psql("postgres", 'select * from t_heap') self.assertTrue('does not exist' in result[2].decode("utf-8")) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_archive_node_backup_stream_restore_to_recovery_time(self): @@ -1205,13 +1132,12 @@ def test_archive_node_backup_stream_restore_to_recovery_time(self): make node with archiving, make stream backup, make PITR to Recovery Time """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1242,9 +1168,6 @@ def test_archive_node_backup_stream_restore_to_recovery_time(self): result = node.psql("postgres", 'select * from t_heap') self.assertTrue('does not exist' in result[2].decode("utf-8")) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_archive_node_backup_stream_pitr(self): @@ -1253,13 +1176,12 @@ def test_archive_node_backup_stream_pitr(self): create table t_heap, make pitr to Recovery Time, check that t_heap do not exists """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1290,9 +1212,6 @@ def test_archive_node_backup_stream_pitr(self): result = node.psql("postgres", 'select * from t_heap') self.assertEqual(True, 'does not exist' in result[2].decode("utf-8")) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_archive_node_backup_archive_pitr_2(self): @@ -1301,12 +1220,11 @@ def test_archive_node_backup_archive_pitr_2(self): create table t_heap, make pitr to Recovery Time, check that t_heap do not exists """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1320,7 +1238,7 @@ def test_archive_node_backup_archive_pitr_2(self): node.stop() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() recovery_time = self.show_pb( @@ -1348,9 +1266,6 @@ def test_archive_node_backup_archive_pitr_2(self): result = node_restored.psql("postgres", 'select * from t_heap') self.assertTrue('does not exist' in result[2].decode("utf-8")) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_archive_restore_to_restore_point(self): @@ -1359,12 +1274,11 @@ def test_archive_restore_to_restore_point(self): create table t_heap, make pitr to Recovery Time, check that t_heap do not exists """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1375,9 +1289,7 @@ def test_archive_restore_to_restore_point(self): node.safe_psql( "postgres", "create table t_heap as select generate_series(0,10000)") - result = node.safe_psql( - "postgres", - "select * from t_heap") + result = node.table_checksum("t_heap") node.safe_psql( "postgres", "select pg_create_restore_point('savepoint')") node.safe_psql( @@ -1393,7 +1305,7 @@ def test_archive_restore_to_restore_point(self): node.slow_start() - result_new = node.safe_psql("postgres", "select * from t_heap") + result_new = node.table_checksum("t_heap") res = node.psql("postgres", "select * from t_heap_1") self.assertEqual( res[0], 1, @@ -1401,18 +1313,14 @@ def test_archive_restore_to_restore_point(self): self.assertEqual(result, result_new) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_zags_block_corrupt(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1463,7 +1371,7 @@ def test_zags_block_corrupt(self): "insert into tbl select i from generate_series(0,100) as i") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), initdb_params=['--data-checksums']) node_restored.cleanup() @@ -1480,14 +1388,13 @@ def test_zags_block_corrupt(self): @unittest.skip("skip") # @unittest.expectedFailure def test_zags_block_corrupt_1(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={ 'full_page_writes': 'on'} ) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1538,7 +1445,7 @@ def test_zags_block_corrupt_1(self): self.switch_wal_segment(node) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored'), + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), initdb_params=['--data-checksums']) pgdata = self.pgdata_content(node.data_dir) @@ -1588,13 +1495,12 @@ def test_restore_chain(self): ERROR delta backups, take valid delta backup, restore must be successfull """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1670,19 +1576,15 @@ def test_restore_chain(self): self.restore_node(backup_dir, 'node', node) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_chain_with_corrupted_backup(self): """more complex test_restore_chain()""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1850,19 +1752,19 @@ def test_restore_chain_with_corrupted_backup(self): node.cleanup() - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") + # Skipped, because backups from the future are invalid. + # This cause a "ERROR: Can't assign backup_id, there is already a backup in future" + # now (PBCKP-259). We can conduct such a test again when we + # untie 'backup_id' from 'start_time' + @unittest.skip("skip") def test_restore_backup_from_future(self): """more complex test_restore_chain()""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1905,22 +1807,18 @@ def test_restore_backup_from_future(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_target_immediate_stream(self): """ correct handling of immediate recovery target for STREAM backups """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1962,22 +1860,18 @@ def test_restore_target_immediate_stream(self): os.path.isfile(recovery_conf), "File {0} do not exists".format(recovery_conf)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_target_immediate_archive(self): """ correct handling of immediate recovery target for ARCHIVE backups """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2022,22 +1916,20 @@ def test_restore_target_immediate_archive(self): with open(recovery_conf, 'r') as f: self.assertIn("recovery_target = 'immediate'", f.read()) - # Clean after yourself - self.del_test_dir(module_name, fname) - - # @unittest.skip("skip") + # Skipped, because default recovery_target_timeline is 'current' + # Before PBCKP-598 the --recovery-target=latest' option did not work and this test allways passed + @unittest.skip("skip") def test_restore_target_latest_archive(self): """ make sure that recovery_target 'latest' is default recovery target """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2088,22 +1980,18 @@ def test_restore_target_latest_archive(self): self.assertEqual(content_1, content_2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_target_new_options(self): """ check that new --recovery-target-* options are working correctly """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2268,9 +2156,6 @@ def test_restore_target_new_options(self): node.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_smart_restore(self): """ @@ -2280,13 +2165,12 @@ def test_smart_restore(self): copied during restore https://github.com/postgrespro/pg_probackup/issues/63 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2332,26 +2216,22 @@ def test_smart_restore(self): for file in filelist_diff: self.assertNotIn(file, logfile_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pg_11_group_access(self): """ test group access for PG >= 11 """ if self.pg_config_version < self.version_to_num('11.0'): - return unittest.skip('You need PostgreSQL >= 11 for this test') + self.skipTest('You need PostgreSQL >= 11 for this test') - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=[ '--data-checksums', '--allow-group-access']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -2363,7 +2243,7 @@ def test_pg_11_group_access(self): # restore backup node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node( @@ -2373,16 +2253,14 @@ def test_pg_11_group_access(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_concurrent_drop_table(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2428,16 +2306,12 @@ def test_restore_concurrent_drop_table(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_lost_non_data_file(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2475,15 +2349,11 @@ def test_lost_non_data_file(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_partial_restore_exclude(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2514,7 +2384,7 @@ def test_partial_restore_exclude(self): # restore FULL backup node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() try: @@ -2551,7 +2421,7 @@ def test_partial_restore_exclude(self): pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) node_restored_2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) node_restored_2.cleanup() self.restore_node( @@ -2590,15 +2460,11 @@ def test_partial_restore_exclude(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_partial_restore_exclude_tablespace(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2645,7 +2511,7 @@ def test_partial_restore_exclude_tablespace(self): # restore FULL backup node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() node1_tablespace = self.get_tblspace_path(node_restored_1, 'somedata') @@ -2671,7 +2537,7 @@ def test_partial_restore_exclude_tablespace(self): pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) node_restored_2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) node_restored_2.cleanup() node2_tablespace = self.get_tblspace_path(node_restored_2, 'somedata') @@ -2713,16 +2579,12 @@ def test_partial_restore_exclude_tablespace(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_partial_restore_include(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -2753,7 +2615,7 @@ def test_partial_restore_include(self): # restore FULL backup node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() try: @@ -2792,7 +2654,7 @@ def test_partial_restore_include(self): pgdata_restored_1 = self.pgdata_content(node_restored_1.data_dir) node_restored_2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_2')) node_restored_2.cleanup() self.restore_node( @@ -2839,9 +2701,6 @@ def test_partial_restore_include(self): self.assertNotIn('PANIC', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_partial_restore_backward_compatibility_1(self): """ old binary should be of version < 2.2.0 @@ -2849,10 +2708,9 @@ def test_partial_restore_backward_compatibility_1(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2875,7 +2733,7 @@ def test_partial_restore_backward_compatibility_1(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -2935,7 +2793,7 @@ def test_partial_restore_backward_compatibility_1(self): # get new node node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() self.restore_node( @@ -2955,10 +2813,9 @@ def test_partial_restore_backward_compatibility_merge(self): if not self.probackup_old_path: self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2981,7 +2838,7 @@ def test_partial_restore_backward_compatibility_merge(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -3041,7 +2898,7 @@ def test_partial_restore_backward_compatibility_merge(self): # get new node node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() # merge @@ -3059,10 +2916,9 @@ def test_partial_restore_backward_compatibility_merge(self): def test_empty_and_mangled_database_map(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3090,7 +2946,7 @@ def test_empty_and_mangled_database_map(self): f.close() node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() try: @@ -3141,7 +2997,7 @@ def test_empty_and_mangled_database_map(self): self.output, self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' + 'ERROR: Field "dbOid" is not found in the line 42 of ' 'the file backup_content.control', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) @@ -3157,7 +3013,7 @@ def test_empty_and_mangled_database_map(self): self.output, self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: field "dbOid" is not found in the line 42 of ' + 'ERROR: Field "dbOid" is not found in the line 42 of ' 'the file backup_content.control', e.message, '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) @@ -3172,10 +3028,9 @@ def test_empty_and_mangled_database_map(self): def test_missing_database_map(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, ptrack_enable=self.ptrack, initdb_params=['--data-checksums']) @@ -3228,8 +3083,7 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") # PG 9.6 elif self.get_version(node) > 90600 and self.get_version(node) < 100000: node.safe_psql( @@ -3269,8 +3123,8 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) - # >= 10 - else: + # >= 10 && < 15 + elif self.get_version(node) >= 100000 and self.get_version(node) < 150000: node.safe_psql( 'backupdb', "REVOKE ALL ON DATABASE backupdb from PUBLIC; " @@ -3306,6 +3160,43 @@ def test_missing_database_map(self): "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" ) + # >= 15 + else: + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_start(text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_backup_stop(boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: # TODO why backup works without these grants ? @@ -3319,13 +3210,11 @@ def test_missing_database_map(self): "CREATE EXTENSION ptrack WITH SCHEMA ptrack") if ProbackupTest.enterprise: - node.safe_psql( - "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup") node.safe_psql( "backupdb", - "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup") + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_version() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pgpro_edition() TO backup;") # FULL backup without database_map backup_id = self.backup_node( @@ -3335,7 +3224,7 @@ def test_missing_database_map(self): pgdata = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() # backup has missing database_map and that is legal @@ -3379,9 +3268,6 @@ def test_missing_database_map(self): pgdata_restored = self.pgdata_content(node_restored.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_stream_restore_command_option(self): """ @@ -3396,14 +3282,13 @@ def test_stream_restore_command_option(self): as replica, check that PostgreSQL recovery uses restore_command to obtain WAL from archive. """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={'max_wal_size': '32MB'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3460,20 +3345,16 @@ def test_stream_restore_command_option(self): self.assertEqual('2', timeline_id) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_primary_conninfo(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3486,7 +3367,7 @@ def test_restore_primary_conninfo(self): #primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() str_conninfo='host=192.168.1.50 port=5432 user=foo password=foopass' @@ -3513,20 +3394,16 @@ def test_restore_primary_conninfo(self): self.assertIn(str_conninfo, recovery_conf_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_restore_primary_slot_info(self): """ """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -3537,7 +3414,7 @@ def test_restore_primary_slot_info(self): node.pgbench_init(scale=1) replica = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'replica')) + base_dir=os.path.join(self.module_name, self.fname, 'replica')) replica.cleanup() node.safe_psql( @@ -3558,17 +3435,13 @@ def test_restore_primary_slot_info(self): replica.slow_start(replica=True) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_issue_249(self): """ https://github.com/postgrespro/pg_probackup/issues/249 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3603,7 +3476,7 @@ def test_issue_249(self): # restore FULL backup node_restored_1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored_1')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored_1')) node_restored_1.cleanup() self.restore_node( @@ -3627,9 +3500,6 @@ def test_issue_249(self): except QueryException as e: self.assertIn('FATAL', e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_pg_12_probackup_recovery_conf_compatibility(self): """ https://github.com/postgrespro/pg_probackup/issues/249 @@ -3640,15 +3510,14 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): self.skipTest("You must specify PGPROBACKUPBIN_OLD" " for run this test") if self.pg_config_version < self.version_to_num('12.0'): - return unittest.skip('You need PostgreSQL >= 12 for this test') + self.skipTest('You need PostgreSQL >= 12 for this test') if self.version_to_num(self.old_probackup_version) >= self.version_to_num('2.4.5'): self.assertTrue(False, 'You need pg_probackup < 2.4.5 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3701,9 +3570,6 @@ def test_pg_12_probackup_recovery_conf_compatibility(self): node.slow_start() - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_drop_postgresql_auto_conf(self): """ https://github.com/postgrespro/pg_probackup/issues/249 @@ -3712,12 +3578,11 @@ def test_drop_postgresql_auto_conf(self): """ if self.pg_config_version < self.version_to_num('12.0'): - return unittest.skip('You need PostgreSQL >= 12 for this test') + self.skipTest('You need PostgreSQL >= 12 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3746,9 +3611,6 @@ def test_drop_postgresql_auto_conf(self): self.assertTrue(os.path.exists(auto_path)) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_truncate_postgresql_auto_conf(self): """ https://github.com/postgrespro/pg_probackup/issues/249 @@ -3757,12 +3619,11 @@ def test_truncate_postgresql_auto_conf(self): """ if self.pg_config_version < self.version_to_num('12.0'): - return unittest.skip('You need PostgreSQL >= 12 for this test') + self.skipTest('You need PostgreSQL >= 12 for this test') - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3791,16 +3652,14 @@ def test_truncate_postgresql_auto_conf(self): self.assertTrue(os.path.exists(auto_path)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_concurrent_restore(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3827,7 +3686,7 @@ def test_concurrent_restore(self): pgdata1 = self.pgdata_content(node.data_dir) node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node.cleanup() node_restored.cleanup() @@ -3850,40 +3709,188 @@ def test_concurrent_restore(self): self.compare_pgdata(pgdata1, pgdata2) self.compare_pgdata(pgdata2, pgdata3) - # Clean after yourself - self.del_test_dir(module_name, fname) + # @unittest.skip("skip") + def test_restore_with_waldir(self): + """recovery using tablespace-mapping option and page backup""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + + with node.connect("postgres") as con: + con.execute( + "CREATE TABLE tbl AS SELECT * " + "FROM generate_series(0,3) AS integer") + con.commit() + + # Full backup + backup_id = self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + # Create waldir + waldir_path = os.path.join(node.base_dir, "waldir") + os.makedirs(waldir_path) + + # Test recovery from latest + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, + options=[ + "-X", "%s" % (waldir_path)]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + node.slow_start() + + count = node.execute("postgres", "SELECT count(*) FROM tbl") + self.assertEqual(count[0][0], 4) + + # check pg_wal is symlink + if node.major_version >= 10: + wal_path=os.path.join(node.data_dir, "pg_wal") + else: + wal_path=os.path.join(node.data_dir, "pg_xlog") + + self.assertEqual(os.path.islink(wal_path), True) + + # @unittest.skip("skip") + def test_restore_to_latest_timeline(self): + """recovery to latest timeline""" + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + node.pgbench_init(scale=2) + + before1 = node.table_checksum("pgbench_branches") + backup_id = self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), + self.restore_node( + backup_dir, 'node', node, options=["-j", "4"]), + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + node.slow_start() + pgbench = node.pgbench( + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + options=['-T', '10', '-c', '2', '--no-vacuum']) + pgbench.wait() + pgbench.stdout.close() + + before2 = node.table_checksum("pgbench_branches") + self.backup_node(backup_dir, 'node', node) + + node.stop() + node.cleanup() + # restore from first backup + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "--recovery-target-timeline=latest", "-i", backup_id] + ) + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "latest") + # check recovery-target=latest option for compatibility with previous versions + node.cleanup() + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "--recovery-target=latest", "-i", backup_id] + ) + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "latest") + + # start postgres and promote wal files to latest timeline + node.slow_start() + + # check for the latest updates + after = node.table_checksum("pgbench_branches") + self.assertEqual(before2, after) + + # checking recovery_target_timeline=current is the default option + if self.pg_config_version >= self.version_to_num('12.0'): + node.stop() + node.cleanup() + + # restore from first backup + restore_result = self.restore_node(backup_dir, 'node', node, + options=[ + "-j", "4", "-i", backup_id] + ) + + self.assertIn( + "INFO: Restore of backup {0} completed.".format(backup_id), restore_result, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(self.output), self.cmd)) + + # check recovery_target_timeline option in the recovery_conf + recovery_target_timeline = self.get_recovery_conf(node)["recovery_target_timeline"] + self.assertEqual(recovery_target_timeline, "current") + + # start postgres with current timeline + node.slow_start() + + # check for the current updates + after = node.table_checksum("pgbench_branches") + self.assertEqual(before1, after) - # skip this test until https://github.com/postgrespro/pg_probackup/pull/399 - @unittest.skip("skip") def test_restore_issue_313(self): """ Check that partially restored PostgreSQL instance cannot be started """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') - node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + self._check_gdb_flag_or_skip_test + node = self.make_simple_node('node', set_replication=True, initdb_params=['--data-checksums']) + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) node.slow_start() - # FULL backup backup_id = self.backup_node(backup_dir, 'node', node) node.cleanup() count = 0 - filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) + filelist = self.get_backup_filelist(backup_dir, 'node', backup_id) for file in filelist: # count only nondata files - if int(filelist[file]['is_datafile']) == 0 and int(filelist[file]['size']) > 0: + if int(filelist[file]['is_datafile']) == 0 and \ + not stat.S_ISDIR(int(filelist[file]['mode'])) and \ + not filelist[file]['size'] == '0' and \ + file != 'database_map': count += 1 - node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + node_restored = self.make_simple_node('node_restored') node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -3891,7 +3898,7 @@ def test_restore_issue_313(self): gdb.verbose = False gdb.set_breakpoint('restore_non_data_file') gdb.run_until_break() - gdb.continue_execution_until_break(count - 2) + gdb.continue_execution_until_break(count - 1) gdb.quit() # emulate the user or HA taking care of PG configuration @@ -3914,5 +3921,12 @@ def test_restore_issue_313(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) + with open(os.path.join(node.logs_dir, 'postgresql.log'), 'r') as f: + if self.pg_config_version >= 120000: + self.assertIn( + "PANIC: could not read file \"global/pg_control\"", + f.read()) + else: + self.assertIn( + "PANIC: could not read from control file", + f.read()) diff --git a/tests/retention.py b/tests/retention_test.py similarity index 91% rename from tests/retention.py rename to tests/retention_test.py index 19204807b..88432a00f 100644 --- a/tests/retention.py +++ b/tests/retention_test.py @@ -6,21 +6,17 @@ from distutils.dir_util import copy_tree -module_name = 'retention' - - class RetentionTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_retention_redundancy_1(self): """purge backups using redundancy-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -71,18 +67,14 @@ def test_retention_redundancy_1(self): self.assertTrue(wal_name >= min_wal) self.assertTrue(wal_name <= max_wal) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_retention_window_2(self): """purge backups using window-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -124,18 +116,14 @@ def test_retention_window_2(self): self.delete_expired(backup_dir, 'node', options=['--expired']) self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_retention_window_3(self): """purge all backups using window-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -171,18 +159,14 @@ def test_retention_window_3(self): # count wal files in ARCHIVE - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_retention_window_4(self): """purge all backups using window-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -232,18 +216,14 @@ def test_retention_window_4(self): n_wals = len(os.listdir(wals_dir)) self.assertTrue(n_wals == 0) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_expire_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -356,18 +336,14 @@ def test_window_expire_interleaved_incremental_chains(self): print(self.show_pb( backup_dir, 'node', as_json=False, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_redundancy_expire_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -466,18 +442,14 @@ def test_redundancy_expire_interleaved_incremental_chains(self): print(self.show_pb( backup_dir, 'node', as_json=False, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_merge_interleaved_incremental_chains(self): """complicated case of interleaved backup chains""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -603,9 +575,6 @@ def test_window_merge_interleaved_incremental_chains(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 2) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_merge_interleaved_incremental_chains_1(self): """ @@ -616,12 +585,11 @@ def test_window_merge_interleaved_incremental_chains_1(self): FULLb FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -745,9 +713,6 @@ def test_window_merge_interleaved_incremental_chains_1(self): pgdata_restored_b3 = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata_b3, pgdata_restored_b3) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants(self): """ @@ -761,12 +726,11 @@ def test_basic_window_merge_multiple_descendants(self): FULLb | FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1005,9 +969,6 @@ def test_basic_window_merge_multiple_descendants(self): self.show_pb(backup_dir, 'node')[0]['backup-mode'], 'FULL') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants_1(self): """ @@ -1021,12 +982,11 @@ def test_basic_window_merge_multiple_descendants_1(self): FULLb | FULLa """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1271,9 +1231,6 @@ def test_basic_window_merge_multiple_descendants_1(self): '--retention-window=1', '--delete-expired', '--log-level-console=log']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_chains(self): """ @@ -1286,12 +1243,11 @@ def test_window_chains(self): PAGE FULL """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1357,9 +1313,6 @@ def test_window_chains(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_chains_1(self): """ @@ -1372,12 +1325,11 @@ def test_window_chains_1(self): PAGE FULL """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1451,9 +1403,6 @@ def test_window_chains_1(self): "Purging finished", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_window_error_backups(self): """ @@ -1466,12 +1415,11 @@ def test_window_error_backups(self): FULL -------redundancy """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1488,9 +1436,6 @@ def test_window_error_backups(self): # Change FULLb backup status to ERROR # self.change_backup_status(backup_dir, 'node', backup_id_b, 'ERROR') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_error_backups_1(self): """ @@ -1499,12 +1444,13 @@ def test_window_error_backups_1(self): FULL -------window """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1517,6 +1463,7 @@ def test_window_error_backups_1(self): gdb = self.backup_node( backup_dir, 'node', node, backup_type='page', gdb=True) + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb.remove_all_breakpoints() @@ -1535,9 +1482,6 @@ def test_window_error_backups_1(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 4) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_window_error_backups_2(self): """ @@ -1546,12 +1490,13 @@ def test_window_error_backups_2(self): FULL -------window """ - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1564,6 +1509,7 @@ def test_window_error_backups_2(self): gdb = self.backup_node( backup_dir, 'node', node, backup_type='page', gdb=True) + # Attention! this breakpoint has been set on internal probackup function, not on a postgres core one gdb.set_breakpoint('pg_stop_backup') gdb.run_until_break() gdb._execute('signal SIGKILL') @@ -1583,21 +1529,18 @@ def test_window_error_backups_2(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 3) - # Clean after yourself - # self.del_test_dir(module_name, fname) - def test_retention_redundancy_overlapping_chains(self): """""" - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1631,21 +1574,18 @@ def test_retention_redundancy_overlapping_chains(self): self.validate_pb(backup_dir, 'node') - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_retention_redundancy_overlapping_chains_1(self): """""" - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') + self.skipTest('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1679,19 +1619,15 @@ def test_retention_redundancy_overlapping_chains_1(self): self.validate_pb(backup_dir, 'node') - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_wal_purge_victim(self): """ https://github.com/postgrespro/pg_probackup/issues/103 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1736,19 +1672,17 @@ def test_wal_purge_victim(self): "WARNING: Backup {0} has missing parent 0".format(page_id), e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_failed_merge_redundancy_retention(self): """ Check that retention purge works correctly with MERGING backups """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join( - module_name, fname, 'node'), + self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -1832,9 +1766,6 @@ def test_failed_merge_redundancy_retention(self): self.assertEqual(len(self.show_pb(backup_dir, 'node')), 10) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_wal_depth_1(self): """ |-------------B5----------> WAL timeline3 @@ -1843,10 +1774,9 @@ def test_wal_depth_1(self): wal-depth=2 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ @@ -1889,7 +1819,7 @@ def test_wal_depth_1(self): # Timeline 2 node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() @@ -1948,8 +1878,6 @@ def test_wal_depth_1(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname) - def test_wal_purge(self): """ -------------------------------------> tli5 @@ -1970,10 +1898,9 @@ def test_wal_purge(self): wal-depth=2 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2010,7 +1937,7 @@ def test_wal_purge(self): # TLI 2 node_tli2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) node_tli2.cleanup() output = self.restore_node( @@ -2044,7 +1971,7 @@ def test_wal_purge(self): # TLI3 node_tli3 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli3')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) node_tli3.cleanup() # Note, that successful validation here is a happy coincidence @@ -2065,7 +1992,7 @@ def test_wal_purge(self): # TLI4 node_tli4 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli4')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) node_tli4.cleanup() self.restore_node( @@ -2087,7 +2014,7 @@ def test_wal_purge(self): # TLI5 node_tli5 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli5')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) node_tli5.cleanup() self.restore_node( @@ -2170,8 +2097,6 @@ def test_wal_purge(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname) - def test_wal_depth_2(self): """ -------------------------------------> tli5 @@ -2193,10 +2118,9 @@ def test_wal_depth_2(self): wal-depth=2 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2231,7 +2155,7 @@ def test_wal_depth_2(self): # TLI 2 node_tli2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli2')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli2')) node_tli2.cleanup() output = self.restore_node( @@ -2265,7 +2189,7 @@ def test_wal_depth_2(self): # TLI3 node_tli3 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli3')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli3')) node_tli3.cleanup() # Note, that successful validation here is a happy coincidence @@ -2286,7 +2210,7 @@ def test_wal_depth_2(self): # TLI4 node_tli4 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli4')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli4')) node_tli4.cleanup() self.restore_node( @@ -2308,7 +2232,7 @@ def test_wal_depth_2(self): # TLI5 node_tli5 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_tli5')) + base_dir=os.path.join(self.module_name, self.fname, 'node_tli5')) node_tli5.cleanup() self.restore_node( @@ -2427,8 +2351,6 @@ def test_wal_depth_2(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname) - def test_basic_wal_depth(self): """ B1---B1----B3-----B4----B5------> tli1 @@ -2438,10 +2360,9 @@ def test_basic_wal_depth(self): wal-depth=1 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2530,16 +2451,15 @@ def test_basic_wal_depth(self): self.validate_pb(backup_dir, 'node') - self.del_test_dir(module_name, fname) - def test_concurrent_running_full_backup(self): """ https://github.com/postgrespro/pg_probackup/issues/328 """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -2607,5 +2527,3 @@ def test_concurrent_running_full_backup(self): self.assertEqual( len(self.show_pb(backup_dir, 'node')), 6) - - self.del_test_dir(module_name, fname) diff --git a/tests/set_backup.py b/tests/set_backup_test.py similarity index 87% rename from tests/set_backup.py rename to tests/set_backup_test.py index 02ce007bf..31334cfba 100644 --- a/tests/set_backup.py +++ b/tests/set_backup_test.py @@ -5,8 +5,6 @@ from sys import exit from datetime import datetime, timedelta -module_name = 'set_backup' - class SetBackupTest(ProbackupTest, unittest.TestCase): @@ -14,10 +12,9 @@ class SetBackupTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") def test_set_backup_sanity(self): """general sanity for set-backup command""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -44,7 +41,7 @@ def test_set_backup_sanity(self): repr(self.output), self.cmd)) except ProbackupException as e: self.assertIn( - 'ERROR: required parameter not specified: --instance', + 'ERROR: Required parameter not specified: --instance', e.message, "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) @@ -120,19 +117,15 @@ def test_set_backup_sanity(self): # parse string to datetime object #new_expire_time = datetime.strptime(new_expire_time, '%Y-%m-%d %H:%M:%S%z') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_retention_redundancy_pinning(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -174,18 +167,14 @@ def test_retention_redundancy_pinning(self): '{1} is guarded by retention'.format(full_id, page_id), log) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_retention_window_pinning(self): """purge all backups using window-based retention policy""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -237,9 +226,6 @@ def test_retention_window_pinning(self): '{1} is guarded by retention'.format(backup_id_1, page1), out) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_wal_retention_and_pinning(self): """ @@ -251,13 +237,12 @@ def test_wal_retention_and_pinning(self): B1 B2---P---B3---> """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -317,9 +302,6 @@ def test_wal_retention_and_pinning(self): '000000010000000000000004') self.assertEqual(timeline['status'], 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_wal_retention_and_pinning_1(self): """ @@ -331,12 +313,11 @@ def test_wal_retention_and_pinning_1(self): P---B1---> """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -383,19 +364,15 @@ def test_wal_retention_and_pinning_1(self): self.validate_pb(backup_dir) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_add_note_newlines(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -418,19 +395,15 @@ def test_add_note_newlines(self): backup_meta = self.show_pb(backup_dir, 'node', backup_id) self.assertNotIn('note', backup_meta) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_add_big_note(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -472,19 +445,16 @@ def test_add_big_note(self): backup_meta = self.show_pb(backup_dir, 'node', backup_id) self.assertEqual(backup_meta['note'], note) - # Clean after yourself - self.del_test_dir(module_name, fname) # @unittest.skip("skip") def test_add_big_note_1(self): """""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -504,6 +474,3 @@ def test_add_big_note_1(self): print(backup_meta) self.assertEqual(backup_meta['note'], note) - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/show.py b/tests/show_test.py similarity index 82% rename from tests/show.py rename to tests/show_test.py index 5a46e5ef7..27b6fab96 100644 --- a/tests/show.py +++ b/tests/show_test.py @@ -3,19 +3,15 @@ from .helpers.ptrack_helpers import ProbackupTest, ProbackupException -module_name = 'show' - - class ShowTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") # @unittest.expectedFailure def test_show_1(self): """Status DONE and OK""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -31,17 +27,13 @@ def test_show_1(self): ) self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_show_json(self): """Status DONE and OK""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -58,16 +50,12 @@ def test_show_json(self): self.backup_node(backup_dir, 'node', node) self.assertIn("OK", self.show_pb(backup_dir, 'node', as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_corrupt_2(self): """Status CORRUPT""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -102,16 +90,12 @@ def test_corrupt_2(self): ) self.assertIn("CORRUPT", self.show_pb(backup_dir, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_no_control_file(self): """backup.control doesn't exist""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -137,16 +121,12 @@ def test_no_control_file(self): 'doesn\'t exist', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_empty_control_file(self): """backup.control is empty""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -173,17 +153,13 @@ def test_empty_control_file(self): 'is empty', output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_control_file(self): """backup.control contains invalid option""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -205,9 +181,6 @@ def test_corrupt_control_file(self): 'WARNING: Invalid option "statuss" in file', self.show_pb(backup_dir, 'node', as_json=False, as_text=True)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_correctness(self): @@ -215,10 +188,9 @@ def test_corrupt_correctness(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -299,9 +271,6 @@ def test_corrupt_correctness(self): output_local['uncompressed-bytes'], output_remote['uncompressed-bytes']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_correctness_1(self): @@ -309,10 +278,9 @@ def test_corrupt_correctness_1(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -397,9 +365,6 @@ def test_corrupt_correctness_1(self): output_local['uncompressed-bytes'], output_remote['uncompressed-bytes']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_corrupt_correctness_2(self): @@ -407,10 +372,9 @@ def test_corrupt_correctness_2(self): if not self.remote: self.skipTest("You must enable PGPROBACKUP_SSH_REMOTE" " for run this test") - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -512,17 +476,13 @@ def test_corrupt_correctness_2(self): output_local['uncompressed-bytes'], output_remote['uncompressed-bytes']) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_color_with_no_terminal(self): """backup.control contains invalid option""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums'], pg_options={'autovacuum': 'off'}) @@ -548,5 +508,38 @@ def test_color_with_no_terminal(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) + # @unittest.skip("skip") + def test_tablespace_print_issue_431(self): + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + initdb_params=['--data-checksums']) + + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + self.set_archiving(backup_dir, 'node', node) + node.slow_start() + + # Create tablespace + tblspc_path = os.path.join(node.base_dir, "tblspc") + os.makedirs(tblspc_path) + with node.connect("postgres") as con: + con.connection.autocommit = True + con.execute("CREATE TABLESPACE tblspc LOCATION '%s'" % tblspc_path) + con.connection.autocommit = False + con.execute("CREATE TABLE test (id int) TABLESPACE tblspc") + con.execute("INSERT INTO test VALUES (1)") + con.commit() + + full_backup_id = self.backup_node(backup_dir, 'node', node) + self.assertIn("OK", self.show_pb(backup_dir,'node', as_text=True)) + # Check that tablespace info exists. JSON + self.assertIn("tablespace_map", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn("oid", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn("path", self.show_pb(backup_dir, 'node', as_text=True)) + self.assertIn(tblspc_path, self.show_pb(backup_dir, 'node', as_text=True)) + # Check that tablespace info exists. PLAIN + self.assertIn("tablespace_map", self.show_pb(backup_dir, 'node', backup_id=full_backup_id, as_text=True, as_json=False)) + self.assertIn(tblspc_path, self.show_pb(backup_dir, 'node', backup_id=full_backup_id, as_text=True, as_json=False)) + # Check that tablespace info NOT exists if backup id not provided. PLAIN + self.assertNotIn("tablespace_map", self.show_pb(backup_dir, 'node', as_text=True, as_json=False)) diff --git a/tests/time_consuming_test.py b/tests/time_consuming_test.py new file mode 100644 index 000000000..c0038c085 --- /dev/null +++ b/tests/time_consuming_test.py @@ -0,0 +1,77 @@ +import os +import unittest +from .helpers.ptrack_helpers import ProbackupTest +import subprocess +from time import sleep + + +class TimeConsumingTests(ProbackupTest, unittest.TestCase): + def test_pbckp150(self): + """ + https://jira.postgrespro.ru/browse/PBCKP-150 + create a node filled with pgbench + create FULL backup followed by PTRACK backup + run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel + """ + # init node + if self.pg_config_version < self.version_to_num('11.0'): + self.skipTest('You need PostgreSQL >= 11 for this test') + if not self.ptrack: + self.skipTest('Skipped because ptrack support is disabled') + + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), + set_replication=True, + ptrack_enable=self.ptrack, + initdb_params=['--data-checksums'], + pg_options={ + 'max_connections': 100, + 'log_statement': 'none', + 'log_checkpoints': 'on', + 'autovacuum': 'off', + 'ptrack.map_size': 1}) + + if node.major_version >= 13: + self.set_auto_conf(node, {'wal_keep_size': '16000MB'}) + else: + self.set_auto_conf(node, {'wal_keep_segments': '1000'}) + + # init probackup and add an instance + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') + self.init_pb(backup_dir) + self.add_instance(backup_dir, 'node', node) + + # run the node and init ptrack + node.slow_start() + node.safe_psql("postgres", "CREATE EXTENSION ptrack") + # populate it with pgbench + node.pgbench_init(scale=5) + + # FULL backup followed by PTRACK backup + self.backup_node(backup_dir, 'node', node, options=['--stream']) + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream']) + + # run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel + nBenchDuration = 30 + pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)]) + with open('/tmp/pbckp150vacuum.sql', 'w') as f: + f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n') + pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)]) + + # several PTRACK backups + for i in range(nBenchDuration): + print("[{}] backing up PTRACK diff...".format(i+1)) + self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE']) + sleep(0.1) + # if the activity pgbench has finished, stop backing up + if pgbench.poll() is not None: + break + + pgbench.kill() + pgbenchval.kill() + pgbench.wait() + pgbenchval.wait() + + backups = self.show_pb(backup_dir, 'node') + for b in backups: + self.assertEqual("OK", b['status']) diff --git a/tests/time_stamp.py b/tests/time_stamp_test.py similarity index 85% rename from tests/time_stamp.py rename to tests/time_stamp_test.py index c49d183da..170c62cd4 100644 --- a/tests/time_stamp.py +++ b/tests/time_stamp_test.py @@ -5,22 +5,19 @@ from time import sleep -module_name = 'time_stamp' - class TimeStamp(ProbackupTest, unittest.TestCase): def test_start_time_format(self): """Test backup ID changing after start-time editing in backup.control. We should convert local time in UTC format""" # Create simple node - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -58,19 +55,16 @@ def test_start_time_format(self): self.assertNotIn("backup ID in control file", output) node.stop() - # Clean after yourself - self.del_test_dir(module_name, fname) def test_server_date_style(self): """Issue #112""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums'], pg_options={"datestyle": "GERMAN, DMY"}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.start() @@ -78,18 +72,14 @@ def test_server_date_style(self): self.backup_node( backup_dir, 'node', node, options=['--stream', '-j 2']) - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_handling_of_TZ_env_variable(self): """Issue #284""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir="{0}/{1}/node".format(module_name, fname), + base_dir="{0}/{1}/node".format(self.module_name, self.fname), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.start() @@ -104,17 +94,13 @@ def test_handling_of_TZ_env_variable(self): self.assertNotIn("backup ID in control file", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") # @unittest.expectedFailure def test_dst_timezone_handling(self): """for manual testing""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -180,16 +166,12 @@ def test_dst_timezone_handling(self): stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_dst_timezone_handling_backward_compatibilty(self): """for manual testing""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -252,6 +234,3 @@ def test_dst_timezone_handling_backward_compatibilty(self): ['sudo', 'timedatectl', 'set-timezone', 'US/Moscow'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() - - # Clean after yourself - self.del_test_dir(module_name, fname) diff --git a/tests/validate.py b/tests/validate_test.py similarity index 92% rename from tests/validate.py rename to tests/validate_test.py index 0b04d92fe..4ff44941f 100644 --- a/tests/validate.py +++ b/tests/validate_test.py @@ -2,15 +2,13 @@ import unittest from .helpers.ptrack_helpers import ProbackupTest, ProbackupException from datetime import datetime, timedelta +from pathlib import Path import subprocess from sys import exit import time import hashlib -module_name = 'validate' - - class ValidateTest(ProbackupTest, unittest.TestCase): # @unittest.skip("skip") @@ -19,12 +17,11 @@ def test_basic_validate_nullified_heap_page_backup(self): """ make node with nullified heap block """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -58,7 +55,7 @@ def test_basic_validate_nullified_heap_page_backup(self): with open(log_file_path) as f: log_content = f.read() self.assertIn( - 'File: "{0}" blknum 1, empty page'.format(file), + 'File: "{0}" blknum 1, empty page'.format(Path(file).as_posix()), log_content, 'Failed to detect nullified block') @@ -70,9 +67,6 @@ def test_basic_validate_nullified_heap_page_backup(self): pgdata_restored = self.pgdata_content(node.data_dir) self.compare_pgdata(pgdata, pgdata_restored) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_validate_wal_unreal_values(self): @@ -80,12 +74,11 @@ def test_validate_wal_unreal_values(self): make node with archiving, make archive backup validate to both real and unreal values """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -212,9 +205,6 @@ def test_validate_wal_unreal_values(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_basic_validate_corrupted_intermediate_backup(self): """ @@ -223,12 +213,11 @@ def test_basic_validate_corrupted_intermediate_backup(self): run validate on PAGE1, expect PAGE1 to gain status CORRUPT and PAGE2 gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -297,9 +286,6 @@ def test_basic_validate_corrupted_intermediate_backup(self): self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_intermediate_backups(self): """ @@ -308,12 +294,11 @@ def test_validate_corrupted_intermediate_backups(self): expect FULL and PAGE1 to gain status CORRUPT and PAGE2 gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -418,9 +403,6 @@ def test_validate_corrupted_intermediate_backups(self): self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_specific_error_intermediate_backups(self): """ @@ -430,12 +412,11 @@ def test_validate_specific_error_intermediate_backups(self): purpose of this test is to be sure that not only CORRUPT backup descendants can be orphanized """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -506,9 +487,6 @@ def test_validate_specific_error_intermediate_backups(self): self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_error_intermediate_backups(self): """ @@ -518,12 +496,11 @@ def test_validate_error_intermediate_backups(self): purpose of this test is to be sure that not only CORRUPT backup descendants can be orphanized """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -590,9 +567,6 @@ def test_validate_error_intermediate_backups(self): self.show_pb(backup_dir, 'node', backup_id_3)['status'], 'Backup STATUS should be "ORPHAN"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_intermediate_backups_1(self): """ @@ -601,12 +575,11 @@ def test_validate_corrupted_intermediate_backups_1(self): expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 to gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -787,9 +760,6 @@ def test_validate_corrupted_intermediate_backups_1(self): 'OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_specific_target_corrupted_intermediate_backups(self): """ @@ -798,12 +768,11 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): expect PAGE1 to gain status CORRUPT, PAGE2, PAGE3, PAGE4 and PAGE5 to gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -979,9 +948,6 @@ def test_validate_specific_target_corrupted_intermediate_backups(self): self.assertEqual('ORPHAN', self.show_pb(backup_dir, 'node', backup_id_7)['status'], 'Backup STATUS should be "ORPHAN"') self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_8)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_several_corrupt_backups(self): """ @@ -990,12 +956,11 @@ def test_validate_instance_with_several_corrupt_backups(self): expect FULL1 to gain status CORRUPT, PAGE1_1 to gain status ORPHAN FULL2 to gain status CORRUPT, PAGE2_1 to gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1080,25 +1045,18 @@ def test_validate_instance_with_several_corrupt_backups(self): 'OK', self.show_pb(backup_dir, 'node', backup_id_6)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_several_corrupt_backups_interrupt(self): """ check that interrupt during validation is handled correctly """ - if not self.gdb: - self.skipTest( - "Specify PGPROBACKUP_GDB and build without " - "optimizations for run this test" - ) - fname = self.id().split('.')[3] + self._check_gdb_flag_or_skip_test() + node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1182,9 +1140,6 @@ def test_validate_instance_with_several_corrupt_backups_interrupt(self): self.assertNotIn( 'Interrupted while locking backup', log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_corrupted_page(self): """ @@ -1192,12 +1147,11 @@ def test_validate_instance_with_corrupted_page(self): corrupt file in PAGE1 backup and run validate on instance, expect PAGE1 to gain status CORRUPT, PAGE2 to gain status ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1329,20 +1283,16 @@ def test_validate_instance_with_corrupted_page(self): 'OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_corrupted_full_and_try_restore(self): """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, corrupt file in FULL backup and run validate on instance, expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN, try to restore backup with --no-validation option""" - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'), + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1425,19 +1375,15 @@ def test_validate_instance_with_corrupted_full_and_try_restore(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(self.output), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_instance_with_corrupted_full(self): """make archive node, take FULL, PAGE1, PAGE2, FULL2, PAGE3 backups, corrupt file in FULL backup and run validate on instance, expect FULL to gain status CORRUPT, PAGE1 and PAGE2 to gain status ORPHAN""" - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'), + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1519,18 +1465,14 @@ def test_validate_instance_with_corrupted_full(self): self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_4)['status'], 'Backup STATUS should be "OK"') self.assertEqual('OK', self.show_pb(backup_dir, 'node', backup_id_5)['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupt_wal_1(self): """make archive node, take FULL1, PAGE1,PAGE2,FULL2,PAGE3,PAGE4 backups, corrupt all wal files, run validate, expect errors""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1581,17 +1523,13 @@ def test_validate_corrupt_wal_1(self): self.show_pb(backup_dir, 'node', backup_id_2)['status'], 'Backup STATUS should be "CORRUPT"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupt_wal_2(self): """make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors""" - fname = self.id().split('.')[3] - node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node'), + node = self.make_simple_node(base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1647,9 +1585,6 @@ def test_validate_corrupt_wal_2(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "CORRUPT"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_wal_lost_segment_1(self): """make archive node, make archive full backup, @@ -1657,12 +1592,11 @@ def test_validate_wal_lost_segment_1(self): run validate, expecting error because of missing wal segment make sure that backup status is 'CORRUPT' """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1724,21 +1658,17 @@ def test_validate_wal_lost_segment_1(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupt_wal_between_backups(self): """ make archive node, make full backup, corrupt all wal files, run validate to real xid, expect errors """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1817,22 +1747,18 @@ def test_validate_corrupt_wal_between_backups(self): self.show_pb(backup_dir, 'node')[1]['status'], 'Backup STATUS should be "OK"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pgpro702_688(self): """ make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() @@ -1858,22 +1784,18 @@ def test_pgpro702_688(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_pgpro688(self): """ make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -1887,9 +1809,6 @@ def test_pgpro688(self): backup_dir, 'node', options=["--time={0}".format(recovery_time), "-j", "4"]) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") # @unittest.expectedFailure def test_pgpro561(self): @@ -1897,13 +1816,12 @@ def test_pgpro561(self): make node with archiving, make stream backup, restore it to node1, check that archiving is not successful on node1 """ - fname = self.id().split('.')[3] node1 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node1'), + base_dir=os.path.join(self.module_name, self.fname, 'node1'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) self.set_archiving(backup_dir, 'node1', node1) @@ -1913,7 +1831,7 @@ def test_pgpro561(self): backup_dir, 'node1', node1, options=["--stream"]) node2 = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node2')) + base_dir=os.path.join(self.module_name, self.fname, 'node2')) node2.cleanup() node1.psql( @@ -1995,9 +1913,6 @@ def test_pgpro561(self): self.assertFalse( 'pg_probackup archive-push completed successfully' in log_content) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_full(self): """ @@ -2008,15 +1923,14 @@ def test_validate_corrupted_full(self): remove corruption and run valudate again, check that second full backup and his page backups are OK """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums'], pg_options={ 'checkpoint_timeout': '30'}) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2116,9 +2030,6 @@ def test_validate_corrupted_full(self): self.show_pb(backup_dir, 'node')[6]['status'] == 'ERROR') self.assertTrue(self.show_pb(backup_dir, 'node')[7]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_full_1(self): """ @@ -2132,13 +2043,12 @@ def test_validate_corrupted_full_1(self): second page should be CORRUPT third page should be ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2218,9 +2128,6 @@ def test_validate_corrupted_full_1(self): self.assertTrue(self.show_pb(backup_dir, 'node')[5]['status'] == 'CORRUPT') self.assertTrue(self.show_pb(backup_dir, 'node')[6]['status'] == 'ORPHAN') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_full_2(self): """ @@ -2243,13 +2150,12 @@ def test_validate_corrupted_full_2(self): remove corruption from PAGE2_2 and run validate on PAGE2_4 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2580,9 +2486,6 @@ def test_validate_corrupted_full_2(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_corrupted_full_missing(self): """ @@ -2595,13 +2498,12 @@ def test_validate_corrupted_full_missing(self): second full backup and his firts page backups are OK, third page should be ORPHAN """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2813,18 +2715,14 @@ def test_validate_corrupted_full_missing(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - def test_file_size_corruption_no_validate(self): - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), # initdb_params=['--data-checksums'], ) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2875,9 +2773,6 @@ def test_file_size_corruption_no_validate(self): "ERROR: Backup files restoring failed" in e.message, repr(e.message)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_specific_backup_with_missing_backup(self): """ @@ -2894,13 +2789,12 @@ def test_validate_specific_backup_with_missing_backup(self): PAGE1_1 FULL1 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3018,9 +2912,6 @@ def test_validate_specific_backup_with_missing_backup(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_specific_backup_with_missing_backup_1(self): """ @@ -3037,13 +2928,12 @@ def test_validate_specific_backup_with_missing_backup_1(self): PAGE1_1 FULL1 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3139,9 +3029,6 @@ def test_validate_specific_backup_with_missing_backup_1(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_with_missing_backup_1(self): """ @@ -3158,13 +3045,12 @@ def test_validate_with_missing_backup_1(self): PAGE1_1 FULL1 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3328,9 +3214,6 @@ def test_validate_with_missing_backup_1(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validate_with_missing_backup_2(self): """ @@ -3347,13 +3230,12 @@ def test_validate_with_missing_backup_2(self): PAGE1_1 FULL1 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3488,19 +3370,15 @@ def test_validate_with_missing_backup_2(self): self.assertTrue(self.show_pb(backup_dir, 'node')[1]['status'] == 'OK') self.assertTrue(self.show_pb(backup_dir, 'node')[0]['status'] == 'OK') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_corrupt_pg_control_via_resetxlog(self): """ PGPRO-2096 """ - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3558,16 +3436,14 @@ def test_corrupt_pg_control_via_resetxlog(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_validation_after_backup(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + self._check_gdb_flag_or_skip_test() + + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3596,19 +3472,15 @@ def test_validation_after_backup(self): self.show_pb(backup_dir, 'node', backup_id)['status'], 'Backup STATUS should be "ERROR"') - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_validate_corrupt_tablespace_map(self): """ Check that corruption in tablespace_map is detected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3651,9 +3523,6 @@ def test_validate_corrupt_tablespace_map(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - #TODO fix the test @unittest.expectedFailure # @unittest.skip("skip") @@ -3661,10 +3530,9 @@ def test_validate_target_lsn(self): """ Check validation to specific LSN """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3683,7 +3551,7 @@ def test_validate_target_lsn(self): "from generate_series(0,10000) i") node_restored = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) node_restored.cleanup() self.restore_node(backup_dir, 'node', node_restored) @@ -3709,17 +3577,13 @@ def test_validate_target_lsn(self): '--recovery-target-timeline=2', '--recovery-target-lsn={0}'.format(target_lsn)]) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_partial_validate_empty_and_mangled_database_map(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -3783,16 +3647,12 @@ def test_partial_validate_empty_and_mangled_database_map(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_partial_validate_exclude(self): """""" - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3855,17 +3715,13 @@ def test_partial_validate_exclude(self): self.assertIn( "VERBOSE: Skip file validation due to partial restore", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - @unittest.skip("skip") def test_partial_validate_include(self): """ """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) self.init_pb(backup_dir) @@ -3917,18 +3773,14 @@ def test_partial_validate_include(self): self.assertNotIn( "VERBOSE: Skip file validation due to partial restore", output) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.skip("skip") def test_not_validate_diffenent_pg_version(self): """Do not validate backup, if binary is compiled with different PG version""" - fname = self.id().split('.')[3] node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), initdb_params=['--data-checksums']) - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -3971,19 +3823,15 @@ def test_not_validate_diffenent_pg_version(self): "\n Unexpected Error Message: {0}\n CMD: {1}".format( repr(e.message), self.cmd)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_validate_corrupt_page_header_map(self): """ Check that corruption in page_header_map is detected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -4007,46 +3855,34 @@ def test_validate_corrupt_page_header_map(self): f.seek(42) f.write(b"blah") f.flush() - f.close - try: + with self.assertRaises(ProbackupException) as cm: self.validate_pb(backup_dir, 'node', backup_id=backup_id) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: An error occured during metadata decompression' in e.message and - 'data error' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn("Backup {0} is corrupt".format(backup_id), e.message) + e = cm.exception + self.assertRegex( + cm.exception.message, + r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) - try: + self.assertIn("Backup {0} is corrupt".format(backup_id), e.message) + + with self.assertRaises(ProbackupException) as cm: self.validate_pb(backup_dir) - self.assertEqual( - 1, 0, - "Expecting Error because page_header is corrupted.\n " - "Output: {0} \n CMD: {1}".format( - self.output, self.cmd)) - except ProbackupException as e: - self.assertTrue( - 'WARNING: An error occured during metadata decompression' in e.message and - 'data error' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) - self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) - self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) + e = cm.exception + self.assertRegex( + e.message, + r'WARNING: An error occured during metadata decompression for file "[\w/]+": (data|buffer) error', + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) - self.assertIn("WARNING: Some backups are not valid", e.message) + self.assertIn("INFO: Backup {0} data files are valid".format(ok_1), e.message) + self.assertIn("WARNING: Backup {0} data files are corrupted".format(backup_id), e.message) + self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) + self.assertIn("WARNING: Some backups are not valid", e.message) # @unittest.expectedFailure # @unittest.skip("skip") @@ -4054,10 +3890,9 @@ def test_validate_truncated_page_header_map(self): """ Check that corruption in page_header_map is detected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -4108,19 +3943,15 @@ def test_validate_truncated_page_header_map(self): self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) self.assertIn("WARNING: Some backups are not valid", e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_validate_missing_page_header_map(self): """ Check that corruption in page_header_map is detected """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -4168,19 +3999,15 @@ def test_validate_missing_page_header_map(self): self.assertIn("INFO: Backup {0} data files are valid".format(ok_2), e.message) self.assertIn("WARNING: Some backups are not valid", e.message) - # Clean after yourself - self.del_test_dir(module_name, fname) - # @unittest.expectedFailure # @unittest.skip("skip") def test_no_validate_tablespace_map(self): """ Check that --no-validate is propagated to tablespace_map """ - fname = self.id().split('.')[3] - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir=os.path.join(module_name, fname, 'node'), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -4233,9 +4060,6 @@ def test_no_validate_tablespace_map(self): tblspace_new, "Symlink '{0}' do not points to '{1}'".format(tablespace_link, tblspace_new)) - # Clean after yourself - self.del_test_dir(module_name, fname) - # validate empty backup list # page from future during validate # page from future during backup @@ -4247,4 +4071,4 @@ def test_no_validate_tablespace_map(self): # 715 MAXALIGN(header.compressed_size), in); # 716 if (read_len != MAXALIGN(header.compressed_size)) # -> 717 elog(ERROR, "cannot read block %u of \"%s\" read %lu of %d", -# 718 blknum, file->path, read_len, header.compressed_size); \ No newline at end of file +# 718 blknum, file->path, read_len, header.compressed_size); diff --git a/travis/Dockerfile.in b/travis/Dockerfile.in deleted file mode 100644 index a67663d3b..000000000 --- a/travis/Dockerfile.in +++ /dev/null @@ -1,30 +0,0 @@ -FROM ololobus/postgres-dev:stretch - -USER root -RUN apt-get update -RUN apt-get -yq install python3 python3-pip - -# RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py -# RUN python2 get-pip.py -RUN python3 -m pip install virtualenv - -# Environment -ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH} -ENV PTRACK_PATCH_PG_BRANCH=${PTRACK_PATCH_PG_BRANCH} -ENV PGPROBACKUP_GDB=${PGPROBACKUP_GDB} -ENV LANG=C.UTF-8 PGHOME=/pg/testdir/pgbin - -# Make directories -RUN mkdir -p /pg/testdir - -COPY run_tests.sh /run.sh -RUN chmod 755 /run.sh - -COPY . /pg/testdir -WORKDIR /pg/testdir - -# Grant privileges -RUN chown -R postgres:postgres /pg/testdir - -USER postgres -ENTRYPOINT MODE=${MODE} /run.sh diff --git a/travis/backup_restore.sh b/travis/backup_restore.sh deleted file mode 100644 index b3c9df1ed..000000000 --- a/travis/backup_restore.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/sh -ex - -# vars -export PGVERSION=9.5.4 -export PATH=$PATH:/usr/pgsql-9.5/bin -export PGUSER=pgbench -export PGDATABASE=pgbench -export PGDATA=/var/lib/pgsql/9.5/data -export BACKUP_PATH=/backups -export ARCLOG_PATH=$BACKUP_PATH/backup/pg_xlog -export PGDATA2=/var/lib/pgsql/9.5/data2 -export PGBENCH_SCALE=100 -export PGBENCH_TIME=60 - -# prepare directory -cp -a /tests /build -pushd /build - -# download postgresql -yum install -y wget -wget -k https://ftp.postgresql.org/pub/source/v$PGVERSION/postgresql-$PGVERSION.tar.gz -O postgresql.tar.gz -tar xf postgresql.tar.gz - -# install pg_probackup -yum install -y https://download.postgresql.org/pub/repos/yum/9.5/redhat/rhel-7-x86_64/pgdg-centos95-9.5-2.noarch.rpm -yum install -y postgresql95-devel make gcc readline-devel openssl-devel pam-devel libxml2-devel libxslt-devel -make top_srcdir=postgresql-$PGVERSION -make install top_srcdir=postgresql-$PGVERSION - -# initialize cluster and database -yum install -y postgresql95-server -su postgres -c "/usr/pgsql-9.5/bin/initdb -D $PGDATA -k" -cat < $PGDATA/pg_hba.conf -local all all trust -host all all 127.0.0.1/32 trust -local replication pgbench trust -host replication pgbench 127.0.0.1/32 trust -EOF -cat < $PGDATA/postgresql.auto.conf -max_wal_senders = 2 -wal_level = logical -wal_log_hints = on -EOF -su postgres -c "/usr/pgsql-9.5/bin/pg_ctl start -w -D $PGDATA" -su postgres -c "createdb -U postgres $PGUSER" -su postgres -c "createuser -U postgres -a -d -E $PGUSER" -pgbench -i -s $PGBENCH_SCALE - -# Count current -COUNT=$(psql -Atc "select count(*) from pgbench_accounts") -pgbench -s $PGBENCH_SCALE -T $PGBENCH_TIME -j 2 -c 10 & - -# create backup -pg_probackup init -pg_probackup backup -b full --disable-ptrack-clear --stream -v -pg_probackup show -sleep $PGBENCH_TIME - -# restore from backup -chown -R postgres:postgres $BACKUP_PATH -su postgres -c "pg_probackup restore -D $PGDATA2" - -# start backup server -su postgres -c "/usr/pgsql-9.5/bin/pg_ctl stop -w -D $PGDATA" -su postgres -c "/usr/pgsql-9.5/bin/pg_ctl start -w -D $PGDATA2" -( psql -Atc "select count(*) from pgbench_accounts" | grep $COUNT ) || (cat $PGDATA2/pg_log/*.log ; exit 1) diff --git a/travis/before-install.sh b/travis/before-install.sh new file mode 100755 index 000000000..376de5e6e --- /dev/null +++ b/travis/before-install.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -xe + +mkdir /pg +chown travis /pg \ No newline at end of file diff --git a/travis/before-script-user.sh b/travis/before-script-user.sh new file mode 100755 index 000000000..d9c07f1e4 --- /dev/null +++ b/travis/before-script-user.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -xe + +ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys +ssh-keyscan -H localhost >> ~/.ssh/known_hosts diff --git a/travis/before-script.sh b/travis/before-script.sh new file mode 100755 index 000000000..ca59bcf23 --- /dev/null +++ b/travis/before-script.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -xe + +/etc/init.d/ssh start + +# Show pg_config path (just in case) +echo "############### pg_config path:" +which pg_config + +# Show pg_config just in case +echo "############### pg_config:" +pg_config + +# Show kernel parameters +echo "############### kernel params:" +cat /proc/sys/kernel/yama/ptrace_scope +sudo sysctl kernel.yama.ptrace_scope=0 +cat /proc/sys/kernel/yama/ptrace_scope diff --git a/travis/docker-compose.yml b/travis/docker-compose.yml deleted file mode 100644 index fc6545567..000000000 --- a/travis/docker-compose.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: "3.7" -services: - tests: - build: - context: . - - cap_add: - - SYS_PTRACE - - security_opt: - - seccomp=unconfined - - # don't work - #sysctls: - # kernel.yama.ptrace_scope: 0 - privileged: true - diff --git a/travis/install.sh b/travis/install.sh new file mode 100755 index 000000000..43ada47b7 --- /dev/null +++ b/travis/install.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +set -xe + +if [ -z ${PG_VERSION+x} ]; then + echo PG_VERSION is not set! + exit 1 +fi + +if [ -z ${PG_BRANCH+x} ]; then + echo PG_BRANCH is not set! + exit 1 +fi + +if [ -z ${PTRACK_PATCH_PG_BRANCH+x} ]; then + PTRACK_PATCH_PG_BRANCH=OFF +fi + +# fix +sudo chown -R travis /home/travis/.ccache + +export PGHOME=/pg + +# Clone Postgres +echo "############### Getting Postgres sources:" +git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 + +# Clone ptrack +if [ "$PTRACK_PATCH_PG_BRANCH" != "OFF" ]; then + git clone https://github.com/postgrespro/ptrack.git -b master --depth=1 postgres/contrib/ptrack + export PG_PROBACKUP_PTRACK=ON +else + export PG_PROBACKUP_PTRACK=OFF +fi + +# Compile and install Postgres +echo "############### Compiling Postgres:" +cd postgres # Go to postgres dir +if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then + git apply -3 contrib/ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff +fi +CC='ccache gcc' CFLAGS="-Og" ./configure --prefix=$PGHOME \ + --cache-file=~/.ccache/configure-cache \ + --enable-debug --enable-cassert --enable-depend \ + --enable-tap-tests --enable-nls +make -s -j$(nproc) install +make -s -j$(nproc) -C contrib/ install + +# Override default Postgres instance +export PATH=$PGHOME/bin:$PATH +export LD_LIBRARY_PATH=$PGHOME/lib +export PG_CONFIG=$(which pg_config) + +if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then + echo "############### Compiling Ptrack:" + make -C contrib/ptrack install +fi + +# Get amcheck if missing +if [ ! -d "contrib/amcheck" ]; then + echo "############### Getting missing amcheck:" + git clone https://github.com/petergeoghegan/amcheck.git --depth=1 contrib/amcheck + make -C contrib/amcheck install +fi + +pip3 install testgres \ No newline at end of file diff --git a/travis/make_dockerfile.sh b/travis/make_dockerfile.sh deleted file mode 100755 index 119125ced..000000000 --- a/travis/make_dockerfile.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env sh - -if [ -z ${PG_VERSION+x} ]; then - echo PG_VERSION is not set! - exit 1 -fi - -if [ -z ${PG_BRANCH+x} ]; then - echo PG_BRANCH is not set! - exit 1 -fi - -if [ -z ${MODE+x} ]; then - MODE=basic -fi - -if [ -z ${PTRACK_PATCH_PG_BRANCH+x} ]; then - PTRACK_PATCH_PG_BRANCH=off -fi - -if [ -z ${PGPROBACKUP_GDB+x} ]; then - PGPROBACKUP_GDB=ON -fi - -echo PG_VERSION=${PG_VERSION} -echo PG_BRANCH=${PG_BRANCH} -echo MODE=${MODE} -echo PTRACK_PATCH_PG_BRANCH=${PTRACK_PATCH_PG_BRANCH} -echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} - -sed \ - -e 's/${PG_VERSION}/'${PG_VERSION}/g \ - -e 's/${PG_BRANCH}/'${PG_BRANCH}/g \ - -e 's/${MODE}/'${MODE}/g \ - -e 's/${PTRACK_PATCH_PG_BRANCH}/'${PTRACK_PATCH_PG_BRANCH}/g \ - -e 's/${PGPROBACKUP_GDB}/'${PGPROBACKUP_GDB}/g \ -Dockerfile.in > Dockerfile diff --git a/travis/run_tests.sh b/travis/run_tests.sh deleted file mode 100755 index 44815407e..000000000 --- a/travis/run_tests.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env bash - -# -# Copyright (c) 2019-2020, Postgres Professional -# -set -xe - -sudo su -c 'mkdir /run/sshd' -sudo su -c 'apt-get update -y' -sudo su -c 'apt-get install openssh-client openssh-server -y' -sudo su -c '/etc/init.d/ssh start' - -ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" -cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys -ssh-keyscan -H localhost >> ~/.ssh/known_hosts - -PG_SRC=$PWD/postgres - -# # Here PG_VERSION is provided by postgres:X-alpine docker image -# curl "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" -o postgresql.tar.bz2 -# echo "$PG_SHA256 *postgresql.tar.bz2" | sha256sum -c - - -# mkdir $PG_SRC - -# tar \ -# --extract \ -# --file postgresql.tar.bz2 \ -# --directory $PG_SRC \ -# --strip-components 1 - -# Clone Postgres -echo "############### Getting Postgres sources:" -git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 - -# Clone ptrack -if [ "$PTRACK_PATCH_PG_BRANCH" != "off" ]; then - git clone https://github.com/postgrespro/ptrack.git -b master --depth=1 - export PG_PROBACKUP_PTRACK=on -else - export PG_PROBACKUP_PTRACK=off -fi - - -# Compile and install Postgres -echo "############### Compiling Postgres:" -cd postgres # Go to postgres dir -if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then - git apply -3 ../ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff -fi -CFLAGS="-O0" ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests -make -s -j$(nproc) install -#make -s -j$(nproc) -C 'src/common' install -#make -s -j$(nproc) -C 'src/port' install -#make -s -j$(nproc) -C 'src/interfaces' install -make -s -j$(nproc) -C contrib/ install - -# Override default Postgres instance -export PATH=$PGHOME/bin:$PATH -export LD_LIBRARY_PATH=$PGHOME/lib -export PG_CONFIG=$(which pg_config) - -if [ "$PG_PROBACKUP_PTRACK" = "on" ]; then - echo "############### Compiling Ptrack:" - make USE_PGXS=1 -C ../ptrack install -fi - -# Get amcheck if missing -if [ ! -d "contrib/amcheck" ]; then - echo "############### Getting missing amcheck:" - git clone https://github.com/petergeoghegan/amcheck.git --depth=1 contrib/amcheck - make USE_PGXS=1 -C contrib/amcheck install -fi - -# Get back to testdir -cd .. - -# Show pg_config path (just in case) -echo "############### pg_config path:" -which pg_config - -# Show pg_config just in case -echo "############### pg_config:" -pg_config - -# Show kernel parameters -echo "############### kernel params:" -cat /proc/sys/kernel/yama/ptrace_scope -sudo sysctl kernel.yama.ptrace_scope=0 -cat /proc/sys/kernel/yama/ptrace_scope - -# Build and install pg_probackup (using PG_CPPFLAGS and SHLIB_LINK for gcov) -echo "############### Compiling and installing pg_probackup:" -# make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" top_srcdir=$CUSTOM_PG_SRC install -make USE_PGXS=1 top_srcdir=$PG_SRC install - -# Setup python environment -echo "############### Setting up python env:" -python3 -m virtualenv pyenv -source pyenv/bin/activate -pip3 install testgres - -echo "############### Testing:" -if [ "$MODE" = "basic" ]; then - export PG_PROBACKUP_TEST_BASIC=ON - python3 -m unittest -v tests - python3 -m unittest -v tests.init -else - python3 -m unittest -v tests.$MODE -fi - -# Generate *.gcov files -# gcov src/*.c src/*.h - -# Send coverage stats to Codecov -# bash <(curl -s https://codecov.io/bash) diff --git a/travis/script.sh b/travis/script.sh new file mode 100755 index 000000000..31ef09726 --- /dev/null +++ b/travis/script.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -xe + +export PGHOME=/pg +export PG_SRC=$PWD/postgres +export PATH=$PGHOME/bin:$PATH +export LD_LIBRARY_PATH=$PGHOME/lib +export PG_CONFIG=$(which pg_config) + +# Build and install pg_probackup (using PG_CPPFLAGS and SHLIB_LINK for gcov) +echo "############### Compiling and installing pg_probackup:" +# make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" top_srcdir=$CUSTOM_PG_SRC install +make USE_PGXS=1 top_srcdir=$PG_SRC install + +if [ -z ${MODE+x} ]; then + MODE=basic +fi + +if [ -z ${PGPROBACKUP_GDB+x} ]; then + PGPROBACKUP_GDB=ON +fi + +echo "############### Testing:" +echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA} +echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION} +echo PGPROBACKUPBIN_OLD=${PGPROBACKUPBIN_OLD} +echo PGPROBACKUPBIN=${PGPROBACKUPBIN} +echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} +echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} +echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} + +if [ "$MODE" = "basic" ]; then + export PG_PROBACKUP_TEST_BASIC=ON + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} + python3 -m unittest -v tests + python3 -m unittest -v tests.init_test +else + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} + python3 -m unittest -v tests.$MODE +fi