diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index f3dae7101..000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,42 +0,0 @@ -restore_registry: &RESTORE_REGISTRY - restore_cache: - key: registry -save_registry: &SAVE_REGISTRY - save_cache: - key: registry-{{ .BuildNum }} - paths: - - /usr/local/cargo/registry/index -deps_key: &DEPS_KEY - key: deps-{{ checksum "~/rust-version" }}-{{ checksum "Cargo.lock" }} -restore_deps: &RESTORE_DEPS - restore_cache: - <<: *DEPS_KEY -save_deps: &SAVE_DEPS - save_cache: - <<: *DEPS_KEY - paths: - - target - - /usr/local/cargo/registry/cache - -version: 2 -jobs: - build: - docker: - - image: rust:1.41.0 - environment: - RUSTFLAGS: -D warnings - - image: sfackler/rust-postgres-test:6 - steps: - - checkout - - run: rustup component add rustfmt clippy - - *RESTORE_REGISTRY - - run: cargo generate-lockfile - - *SAVE_REGISTRY - - run: rustc --version > ~/rust-version - - *RESTORE_DEPS - - run: cargo fmt --all -- --check - - run: cargo clippy --all --all-targets --all-features - - run: cargo test --all - - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features - - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features - - *SAVE_DEPS diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..1332f8eb5 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,8 @@ +version: 2 +updates: +- package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + time: "13:00" + open-pull-requests-limit: 10 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..3426d624b --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,108 @@ +name: CI + +on: + pull_request: + branches: + - master + push: + branches: + - master + +env: + RUSTFLAGS: -Dwarnings + RUST_BACKTRACE: 1 + +jobs: + rustfmt: + name: rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: sfackler/actions/rustup@master + - uses: sfackler/actions/rustfmt@master + + clippy: + name: clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: sfackler/actions/rustup@master + - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT + id: rust-version + - uses: actions/cache@v3 + with: + path: ~/.cargo/registry/index + key: index-${{ runner.os }}-${{ github.run_number }} + restore-keys: | + index-${{ runner.os }}- + - run: cargo generate-lockfile + - uses: actions/cache@v3 + with: + path: ~/.cargo/registry/cache + key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} + - run: cargo fetch + - uses: actions/cache@v3 + with: + path: target + key: clippy-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y + - run: cargo clippy --all --all-targets + + check-wasm32: + name: check-wasm32 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: sfackler/actions/rustup@master + - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT + id: rust-version + - run: rustup target add wasm32-unknown-unknown + - uses: actions/cache@v3 + with: + path: ~/.cargo/registry/index + key: index-${{ runner.os }}-${{ github.run_number }} + restore-keys: | + index-${{ runner.os }}- + - run: cargo generate-lockfile + - uses: actions/cache@v3 + with: + path: ~/.cargo/registry/cache + key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} + - run: cargo fetch + - uses: actions/cache@v3 + with: + path: target + key: check-wasm32-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} + - run: cargo check --target wasm32-unknown-unknown --manifest-path tokio-postgres/Cargo.toml --no-default-features --features js + env: + RUSTFLAGS: --cfg getrandom_backend="wasm_js" + + test: + name: test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: docker compose up -d + - uses: sfackler/actions/rustup@master + with: + version: 1.81.0 + - run: echo "version=$(rustc --version)" >> $GITHUB_OUTPUT + id: rust-version + - uses: actions/cache@v3 + with: + path: ~/.cargo/registry/index + key: index-${{ runner.os }}-${{ github.run_number }} + restore-keys: | + index-${{ runner.os }}- + - run: cargo generate-lockfile + - uses: actions/cache@v3 + with: + path: ~/.cargo/registry/cache + key: registry-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }} + - run: cargo fetch + - uses: actions/cache@v3 + with: + path: target + key: test-target-${{ runner.os }}-${{ steps.rust-version.outputs.version }}-${{ hashFiles('Cargo.lock') }}y + - run: cargo test --all + - run: cargo test --manifest-path tokio-postgres/Cargo.toml --no-default-features + - run: cargo test --manifest-path tokio-postgres/Cargo.toml --all-features diff --git a/Cargo.toml b/Cargo.toml index 4752836a7..16e3739dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "postgres-types", "tokio-postgres", ] +resolver = "2" [profile.release] debug = 2 diff --git a/LICENSE b/LICENSE deleted file mode 100644 index c7e577c00..000000000 --- a/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013-2017 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000..71803aea1 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Steven Fackler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/README.md b/README.md index cbe7182c6..b81a6716f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ # Rust-Postgres -[![CircleCI](https://circleci.com/gh/sfackler/rust-postgres.svg?style=shield)](https://circleci.com/gh/sfackler/rust-postgres) PostgreSQL support for Rust. @@ -32,3 +31,16 @@ TLS support for postgres and tokio-postgres via native-tls. [Documentation](https://docs.rs/postgres-openssl) TLS support for postgres and tokio-postgres via openssl. + +# Running test suite + +The test suite requires postgres to be running in the correct configuration. The easiest way to do this is with docker: + +1. Install `docker` and `docker-compose`. + 1. On ubuntu: `sudo apt install docker.io docker-compose`. +1. Make sure your user has permissions for docker. + 1. On ubuntu: ``sudo usermod -aG docker $USER`` +1. Change to top-level directory of `rust-postgres` repo. +1. Run `docker-compose up -d`. +1. Run `cargo test`. +1. Run `docker-compose stop`. diff --git a/THIRD_PARTY b/THIRD_PARTY index 80336ea0f..05e5ac435 100644 --- a/THIRD_PARTY +++ b/THIRD_PARTY @@ -27,33 +27,3 @@ BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - -------------------------------------------------------------------------------- - -* src/url.rs has been copied from Rust - -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index 8ff4d58be..bbe6b789c 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Steven Fackler "] [dependencies] -phf_codegen = "0.8" +phf_codegen = "0.11" regex = "1.0" marksman_escape = "0.1" linked-hash-map = "0.5" diff --git a/codegen/src/errcodes.txt b/codegen/src/errcodes.txt index 867e98b69..62418a051 100644 --- a/codegen/src/errcodes.txt +++ b/codegen/src/errcodes.txt @@ -2,7 +2,7 @@ # errcodes.txt # PostgreSQL error codes # -# Copyright (c) 2003-2019, PostgreSQL Global Development Group +# Copyright (c) 2003-2022, PostgreSQL Global Development Group # # This list serves as the basis for generating source files containing error # codes. It is kept in a common format to make sure all these source files have @@ -207,6 +207,7 @@ Section: Class 22 - Data Exception 2200S E ERRCODE_INVALID_XML_COMMENT invalid_xml_comment 2200T E ERRCODE_INVALID_XML_PROCESSING_INSTRUCTION invalid_xml_processing_instruction 22030 E ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE duplicate_json_object_key_value +22031 E ERRCODE_INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION invalid_argument_for_sql_json_datetime_function 22032 E ERRCODE_INVALID_JSON_TEXT invalid_json_text 22033 E ERRCODE_INVALID_SQL_JSON_SUBSCRIPT invalid_sql_json_subscript 22034 E ERRCODE_MORE_THAN_ONE_SQL_JSON_ITEM more_than_one_sql_json_item @@ -221,6 +222,7 @@ Section: Class 22 - Data Exception 2203D E ERRCODE_TOO_MANY_JSON_ARRAY_ELEMENTS too_many_json_array_elements 2203E E ERRCODE_TOO_MANY_JSON_OBJECT_MEMBERS too_many_json_object_members 2203F E ERRCODE_SQL_JSON_SCALAR_REQUIRED sql_json_scalar_required +2203G E ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE sql_json_item_cannot_be_cast_to_target_type Section: Class 23 - Integrity Constraint Violation @@ -427,6 +429,7 @@ Section: Class 57 - Operator Intervention 57P02 E ERRCODE_CRASH_SHUTDOWN crash_shutdown 57P03 E ERRCODE_CANNOT_CONNECT_NOW cannot_connect_now 57P04 E ERRCODE_DATABASE_DROPPED database_dropped +57P05 E ERRCODE_IDLE_SESSION_TIMEOUT idle_session_timeout Section: Class 58 - System Error (errors external to PostgreSQL itself) diff --git a/codegen/src/pg_range.dat b/codegen/src/pg_range.dat index dd9baa267..74d6de0cf 100644 --- a/codegen/src/pg_range.dat +++ b/codegen/src/pg_range.dat @@ -3,7 +3,7 @@ # pg_range.dat # Initial contents of the pg_range system catalog. # -# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/include/catalog/pg_range.dat @@ -12,20 +12,23 @@ [ -{ rngtypid => 'int4range', rngsubtype => 'int4', rngsubopc => 'btree/int4_ops', +{ rngtypid => 'int4range', rngsubtype => 'int4', + rngmultitypid => 'int4multirange', rngsubopc => 'btree/int4_ops', rngcanonical => 'int4range_canonical', rngsubdiff => 'int4range_subdiff' }, { rngtypid => 'numrange', rngsubtype => 'numeric', - rngsubopc => 'btree/numeric_ops', rngcanonical => '-', - rngsubdiff => 'numrange_subdiff' }, + rngmultitypid => 'nummultirange', rngsubopc => 'btree/numeric_ops', + rngcanonical => '-', rngsubdiff => 'numrange_subdiff' }, { rngtypid => 'tsrange', rngsubtype => 'timestamp', - rngsubopc => 'btree/timestamp_ops', rngcanonical => '-', - rngsubdiff => 'tsrange_subdiff' }, + rngmultitypid => 'tsmultirange', rngsubopc => 'btree/timestamp_ops', + rngcanonical => '-', rngsubdiff => 'tsrange_subdiff' }, { rngtypid => 'tstzrange', rngsubtype => 'timestamptz', - rngsubopc => 'btree/timestamptz_ops', rngcanonical => '-', - rngsubdiff => 'tstzrange_subdiff' }, -{ rngtypid => 'daterange', rngsubtype => 'date', rngsubopc => 'btree/date_ops', + rngmultitypid => 'tstzmultirange', rngsubopc => 'btree/timestamptz_ops', + rngcanonical => '-', rngsubdiff => 'tstzrange_subdiff' }, +{ rngtypid => 'daterange', rngsubtype => 'date', + rngmultitypid => 'datemultirange', rngsubopc => 'btree/date_ops', rngcanonical => 'daterange_canonical', rngsubdiff => 'daterange_subdiff' }, -{ rngtypid => 'int8range', rngsubtype => 'int8', rngsubopc => 'btree/int8_ops', +{ rngtypid => 'int8range', rngsubtype => 'int8', + rngmultitypid => 'int8multirange', rngsubopc => 'btree/int8_ops', rngcanonical => 'int8range_canonical', rngsubdiff => 'int8range_subdiff' }, ] diff --git a/codegen/src/pg_type.dat b/codegen/src/pg_type.dat index be49e0011..df4587946 100644 --- a/codegen/src/pg_type.dat +++ b/codegen/src/pg_type.dat @@ -3,7 +3,7 @@ # pg_type.dat # Initial contents of the pg_type system catalog. # -# Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group +# Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/include/catalog/pg_type.dat @@ -15,14 +15,10 @@ # For types used in the system catalogs, make sure the values here match # TypInfo[] in bootstrap.c. -# OID symbol macro names for pg_type OIDs are generated by genbki.pl -# according to the following rule, so you don't need to specify them -# here: +# OID symbol macro names for pg_type OIDs are not specified here because +# they are generated by genbki.pl according to the following rule: # foo_bar -> FOO_BAROID # _foo_bar -> FOO_BARARRAYOID -# -# The only oid_symbol entries in this file are for names that don't match -# this rule, and are grandfathered in. # To autogenerate an array type, add 'array_type_oid => 'nnnn' to the element # type, which will instruct genbki.pl to generate a BKI entry for it. @@ -46,15 +42,16 @@ typinput => 'byteain', typoutput => 'byteaout', typreceive => 'bytearecv', typsend => 'byteasend', typalign => 'i', typstorage => 'x' }, { oid => '18', array_type_oid => '1002', descr => 'single character', - typname => 'char', typlen => '1', typbyval => 't', typcategory => 'S', + typname => 'char', typlen => '1', typbyval => 't', typcategory => 'Z', typinput => 'charin', typoutput => 'charout', typreceive => 'charrecv', typsend => 'charsend', typalign => 'c' }, { oid => '19', array_type_oid => '1003', descr => '63-byte type for storing system identifiers', typname => 'name', typlen => 'NAMEDATALEN', typbyval => 'f', - typcategory => 'S', typelem => 'char', typinput => 'namein', - typoutput => 'nameout', typreceive => 'namerecv', typsend => 'namesend', - typalign => 'c', typcollation => 'C' }, + typcategory => 'S', typsubscript => 'raw_array_subscript_handler', + typelem => 'char', typinput => 'namein', typoutput => 'nameout', + typreceive => 'namerecv', typsend => 'namesend', typalign => 'c', + typcollation => 'C' }, { oid => '20', array_type_oid => '1016', descr => '~18 digit integer, 8-byte storage', typname => 'int8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', @@ -68,7 +65,8 @@ { oid => '22', array_type_oid => '1006', descr => 'array of int2, used in system tables', typname => 'int2vector', typlen => '-1', typbyval => 'f', typcategory => 'A', - typelem => 'int2', typinput => 'int2vectorin', typoutput => 'int2vectorout', + typsubscript => 'array_subscript_handler', typelem => 'int2', + typinput => 'int2vectorin', typoutput => 'int2vectorout', typreceive => 'int2vectorrecv', typsend => 'int2vectorsend', typalign => 'i' }, { oid => '23', array_type_oid => '1007', @@ -108,27 +106,28 @@ { oid => '30', array_type_oid => '1013', descr => 'array of oids, used in system tables', typname => 'oidvector', typlen => '-1', typbyval => 'f', typcategory => 'A', - typelem => 'oid', typinput => 'oidvectorin', typoutput => 'oidvectorout', + typsubscript => 'array_subscript_handler', typelem => 'oid', + typinput => 'oidvectorin', typoutput => 'oidvectorout', typreceive => 'oidvectorrecv', typsend => 'oidvectorsend', typalign => 'i' }, # hand-built rowtype entries for bootstrapped catalogs # NB: OIDs assigned here must match the BKI_ROWTYPE_OID declarations -{ oid => '71', +{ oid => '71', array_type_oid => '210', typname => 'pg_type', typlen => '-1', typbyval => 'f', typtype => 'c', typcategory => 'C', typrelid => 'pg_type', typinput => 'record_in', typoutput => 'record_out', typreceive => 'record_recv', typsend => 'record_send', typalign => 'd', typstorage => 'x' }, -{ oid => '75', +{ oid => '75', array_type_oid => '270', typname => 'pg_attribute', typlen => '-1', typbyval => 'f', typtype => 'c', typcategory => 'C', typrelid => 'pg_attribute', typinput => 'record_in', typoutput => 'record_out', typreceive => 'record_recv', typsend => 'record_send', typalign => 'd', typstorage => 'x' }, -{ oid => '81', +{ oid => '81', array_type_oid => '272', typname => 'pg_proc', typlen => '-1', typbyval => 'f', typtype => 'c', typcategory => 'C', typrelid => 'pg_proc', typinput => 'record_in', typoutput => 'record_out', typreceive => 'record_recv', typsend => 'record_send', typalign => 'd', typstorage => 'x' }, -{ oid => '83', +{ oid => '83', array_type_oid => '273', typname => 'pg_class', typlen => '-1', typbyval => 'f', typtype => 'c', typcategory => 'C', typrelid => 'pg_class', typinput => 'record_in', typoutput => 'record_out', typreceive => 'record_recv', @@ -144,52 +143,53 @@ typname => 'xml', typlen => '-1', typbyval => 'f', typcategory => 'U', typinput => 'xml_in', typoutput => 'xml_out', typreceive => 'xml_recv', typsend => 'xml_send', typalign => 'i', typstorage => 'x' }, -{ oid => '194', oid_symbol => 'PGNODETREEOID', - descr => 'string representing an internal node tree', +{ oid => '194', descr => 'string representing an internal node tree', typname => 'pg_node_tree', typlen => '-1', typbyval => 'f', - typcategory => 'S', typinput => 'pg_node_tree_in', + typcategory => 'Z', typinput => 'pg_node_tree_in', typoutput => 'pg_node_tree_out', typreceive => 'pg_node_tree_recv', typsend => 'pg_node_tree_send', typalign => 'i', typstorage => 'x', typcollation => 'default' }, -{ oid => '3361', oid_symbol => 'PGNDISTINCTOID', - descr => 'multivariate ndistinct coefficients', +{ oid => '3361', descr => 'multivariate ndistinct coefficients', typname => 'pg_ndistinct', typlen => '-1', typbyval => 'f', - typcategory => 'S', typinput => 'pg_ndistinct_in', + typcategory => 'Z', typinput => 'pg_ndistinct_in', typoutput => 'pg_ndistinct_out', typreceive => 'pg_ndistinct_recv', typsend => 'pg_ndistinct_send', typalign => 'i', typstorage => 'x', typcollation => 'default' }, -{ oid => '3402', oid_symbol => 'PGDEPENDENCIESOID', - descr => 'multivariate dependencies', +{ oid => '3402', descr => 'multivariate dependencies', typname => 'pg_dependencies', typlen => '-1', typbyval => 'f', - typcategory => 'S', typinput => 'pg_dependencies_in', + typcategory => 'Z', typinput => 'pg_dependencies_in', typoutput => 'pg_dependencies_out', typreceive => 'pg_dependencies_recv', typsend => 'pg_dependencies_send', typalign => 'i', typstorage => 'x', typcollation => 'default' }, -{ oid => '5017', oid_symbol => 'PGMCVLISTOID', - descr => 'multivariate MCV list', - typname => 'pg_mcv_list', typlen => '-1', typbyval => 'f', typcategory => 'S', +{ oid => '5017', descr => 'multivariate MCV list', + typname => 'pg_mcv_list', typlen => '-1', typbyval => 'f', typcategory => 'Z', typinput => 'pg_mcv_list_in', typoutput => 'pg_mcv_list_out', typreceive => 'pg_mcv_list_recv', typsend => 'pg_mcv_list_send', typalign => 'i', typstorage => 'x', typcollation => 'default' }, -{ oid => '32', oid_symbol => 'PGDDLCOMMANDOID', - descr => 'internal type for passing CollectedCommand', +{ oid => '32', descr => 'internal type for passing CollectedCommand', typname => 'pg_ddl_command', typlen => 'SIZEOF_POINTER', typbyval => 't', typtype => 'p', typcategory => 'P', typinput => 'pg_ddl_command_in', typoutput => 'pg_ddl_command_out', typreceive => 'pg_ddl_command_recv', typsend => 'pg_ddl_command_send', typalign => 'ALIGNOF_POINTER' }, +{ oid => '5069', array_type_oid => '271', descr => 'full transaction id', + typname => 'xid8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', + typcategory => 'U', typinput => 'xid8in', typoutput => 'xid8out', + typreceive => 'xid8recv', typsend => 'xid8send', typalign => 'd' }, # OIDS 600 - 699 { oid => '600', array_type_oid => '1017', descr => 'geometric point \'(x, y)\'', typname => 'point', typlen => '16', typbyval => 'f', typcategory => 'G', - typelem => 'float8', typinput => 'point_in', typoutput => 'point_out', - typreceive => 'point_recv', typsend => 'point_send', typalign => 'd' }, + typsubscript => 'raw_array_subscript_handler', typelem => 'float8', + typinput => 'point_in', typoutput => 'point_out', typreceive => 'point_recv', + typsend => 'point_send', typalign => 'd' }, { oid => '601', array_type_oid => '1018', descr => 'geometric line segment \'(pt1,pt2)\'', typname => 'lseg', typlen => '32', typbyval => 'f', typcategory => 'G', - typelem => 'point', typinput => 'lseg_in', typoutput => 'lseg_out', - typreceive => 'lseg_recv', typsend => 'lseg_send', typalign => 'd' }, + typsubscript => 'raw_array_subscript_handler', typelem => 'point', + typinput => 'lseg_in', typoutput => 'lseg_out', typreceive => 'lseg_recv', + typsend => 'lseg_send', typalign => 'd' }, { oid => '602', array_type_oid => '1019', descr => 'geometric path \'(pt1,...)\'', typname => 'path', typlen => '-1', typbyval => 'f', typcategory => 'G', @@ -198,9 +198,9 @@ { oid => '603', array_type_oid => '1020', descr => 'geometric box \'(lower left,upper right)\'', typname => 'box', typlen => '32', typbyval => 'f', typcategory => 'G', - typdelim => ';', typelem => 'point', typinput => 'box_in', - typoutput => 'box_out', typreceive => 'box_recv', typsend => 'box_send', - typalign => 'd' }, + typdelim => ';', typsubscript => 'raw_array_subscript_handler', + typelem => 'point', typinput => 'box_in', typoutput => 'box_out', + typreceive => 'box_recv', typsend => 'box_send', typalign => 'd' }, { oid => '604', array_type_oid => '1027', descr => 'geometric polygon \'(pt1,...)\'', typname => 'polygon', typlen => '-1', typbyval => 'f', typcategory => 'G', @@ -208,16 +208,17 @@ typsend => 'poly_send', typalign => 'd', typstorage => 'x' }, { oid => '628', array_type_oid => '629', descr => 'geometric line', typname => 'line', typlen => '24', typbyval => 'f', typcategory => 'G', - typelem => 'float8', typinput => 'line_in', typoutput => 'line_out', - typreceive => 'line_recv', typsend => 'line_send', typalign => 'd' }, + typsubscript => 'raw_array_subscript_handler', typelem => 'float8', + typinput => 'line_in', typoutput => 'line_out', typreceive => 'line_recv', + typsend => 'line_send', typalign => 'd' }, # OIDS 700 - 799 { oid => '700', array_type_oid => '1021', descr => 'single-precision floating point number, 4-byte storage', - typname => 'float4', typlen => '4', typbyval => 'FLOAT4PASSBYVAL', - typcategory => 'N', typinput => 'float4in', typoutput => 'float4out', - typreceive => 'float4recv', typsend => 'float4send', typalign => 'i' }, + typname => 'float4', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'float4in', typoutput => 'float4out', typreceive => 'float4recv', + typsend => 'float4send', typalign => 'i' }, { oid => '701', array_type_oid => '1022', descr => 'double-precision floating point number, 8-byte storage', typname => 'float8', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', @@ -233,7 +234,7 @@ typname => 'circle', typlen => '24', typbyval => 'f', typcategory => 'G', typinput => 'circle_in', typoutput => 'circle_out', typreceive => 'circle_recv', typsend => 'circle_send', typalign => 'd' }, -{ oid => '790', oid_symbol => 'CASHOID', array_type_oid => '791', +{ oid => '790', array_type_oid => '791', descr => 'monetary amounts, $d,ddd.cc', typname => 'money', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', typcategory => 'N', typinput => 'cash_in', typoutput => 'cash_out', @@ -379,6 +380,11 @@ typname => 'regclass', typlen => '4', typbyval => 't', typcategory => 'N', typinput => 'regclassin', typoutput => 'regclassout', typreceive => 'regclassrecv', typsend => 'regclasssend', typalign => 'i' }, +{ oid => '4191', array_type_oid => '4192', descr => 'registered collation', + typname => 'regcollation', typlen => '4', typbyval => 't', typcategory => 'N', + typinput => 'regcollationin', typoutput => 'regcollationout', + typreceive => 'regcollationrecv', typsend => 'regcollationsend', + typalign => 'i' }, { oid => '2206', array_type_oid => '2211', descr => 'registered type', typname => 'regtype', typlen => '4', typbyval => 't', typcategory => 'N', typinput => 'regtypein', typoutput => 'regtypeout', @@ -400,8 +406,7 @@ typsend => 'uuid_send', typalign => 'c' }, # pg_lsn -{ oid => '3220', oid_symbol => 'LSNOID', array_type_oid => '3221', - descr => 'PostgreSQL LSN datatype', +{ oid => '3220', array_type_oid => '3221', descr => 'PostgreSQL LSN datatype', typname => 'pg_lsn', typlen => '8', typbyval => 'FLOAT8PASSBYVAL', typcategory => 'U', typinput => 'pg_lsn_in', typoutput => 'pg_lsn_out', typreceive => 'pg_lsn_recv', typsend => 'pg_lsn_send', typalign => 'd' }, @@ -438,8 +443,9 @@ # jsonb { oid => '3802', array_type_oid => '3807', descr => 'Binary JSON', typname => 'jsonb', typlen => '-1', typbyval => 'f', typcategory => 'U', - typinput => 'jsonb_in', typoutput => 'jsonb_out', typreceive => 'jsonb_recv', - typsend => 'jsonb_send', typalign => 'i', typstorage => 'x' }, + typsubscript => 'jsonb_subscript_handler', typinput => 'jsonb_in', + typoutput => 'jsonb_out', typreceive => 'jsonb_recv', typsend => 'jsonb_send', + typalign => 'i', typstorage => 'x' }, { oid => '4072', array_type_oid => '4073', descr => 'JSON path', typname => 'jsonpath', typlen => '-1', typbyval => 'f', typcategory => 'U', typinput => 'jsonpath_in', typoutput => 'jsonpath_out', @@ -451,6 +457,11 @@ typcategory => 'U', typinput => 'txid_snapshot_in', typoutput => 'txid_snapshot_out', typreceive => 'txid_snapshot_recv', typsend => 'txid_snapshot_send', typalign => 'd', typstorage => 'x' }, +{ oid => '5038', array_type_oid => '5039', descr => 'snapshot', + typname => 'pg_snapshot', typlen => '-1', typbyval => 'f', typcategory => 'U', + typinput => 'pg_snapshot_in', typoutput => 'pg_snapshot_out', + typreceive => 'pg_snapshot_recv', typsend => 'pg_snapshot_send', + typalign => 'd', typstorage => 'x' }, # range types { oid => '3904', array_type_oid => '3905', descr => 'range of integers', @@ -486,6 +497,46 @@ typreceive => 'range_recv', typsend => 'range_send', typanalyze => 'range_typanalyze', typalign => 'd', typstorage => 'x' }, +# multirange types +{ oid => '4451', array_type_oid => '6150', descr => 'multirange of integers', + typname => 'int4multirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'i', typstorage => 'x' }, +{ oid => '4532', array_type_oid => '6151', descr => 'multirange of numerics', + typname => 'nummultirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'i', typstorage => 'x' }, +{ oid => '4533', array_type_oid => '6152', + descr => 'multirange of timestamps without time zone', + typname => 'tsmultirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'd', typstorage => 'x' }, +{ oid => '4534', array_type_oid => '6153', + descr => 'multirange of timestamps with time zone', + typname => 'tstzmultirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'd', typstorage => 'x' }, +{ oid => '4535', array_type_oid => '6155', descr => 'multirange of dates', + typname => 'datemultirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'i', typstorage => 'x' }, +{ oid => '4536', array_type_oid => '6157', descr => 'multirange of bigints', + typname => 'int8multirange', typlen => '-1', typbyval => 'f', typtype => 'm', + typcategory => 'R', typinput => 'multirange_in', + typoutput => 'multirange_out', typreceive => 'multirange_recv', + typsend => 'multirange_send', typanalyze => 'multirange_typanalyze', + typalign => 'd', typstorage => 'x' }, + # pseudo-types # types with typtype='p' represent various special cases in the type system. # These cannot be used to define table columns, but are valid as function @@ -503,8 +554,9 @@ # Arrays of records have typcategory P, so they can't be autogenerated. { oid => '2287', typname => '_record', typlen => '-1', typbyval => 'f', typtype => 'p', - typcategory => 'P', typelem => 'record', typinput => 'array_in', - typoutput => 'array_out', typreceive => 'array_recv', typsend => 'array_send', + typcategory => 'P', typsubscript => 'array_subscript_handler', + typelem => 'record', typinput => 'array_in', typoutput => 'array_out', + typreceive => 'array_recv', typsend => 'array_send', typanalyze => 'array_typanalyze', typalign => 'd', typstorage => 'x' }, { oid => '2275', array_type_oid => '1263', descr => 'C-style string', typname => 'cstring', typlen => '-2', typbyval => 'f', typtype => 'p', @@ -528,7 +580,7 @@ typname => 'trigger', typlen => '4', typbyval => 't', typtype => 'p', typcategory => 'P', typinput => 'trigger_in', typoutput => 'trigger_out', typreceive => '-', typsend => '-', typalign => 'i' }, -{ oid => '3838', oid_symbol => 'EVTTRIGGEROID', +{ oid => '3838', descr => 'pseudo-type for the result of an event trigger function', typname => 'event_trigger', typlen => '4', typbyval => 't', typtype => 'p', typcategory => 'P', typinput => 'event_trigger_in', @@ -546,10 +598,6 @@ typtype => 'p', typcategory => 'P', typinput => 'internal_in', typoutput => 'internal_out', typreceive => '-', typsend => '-', typalign => 'ALIGNOF_POINTER' }, -{ oid => '2282', descr => 'obsolete, deprecated pseudo-type', - typname => 'opaque', typlen => '4', typbyval => 't', typtype => 'p', - typcategory => 'P', typinput => 'opaque_in', typoutput => 'opaque_out', - typreceive => '-', typsend => '-', typalign => 'i' }, { oid => '2283', descr => 'pseudo-type representing a polymorphic base type', typname => 'anyelement', typlen => '4', typbyval => 't', typtype => 'p', typcategory => 'P', typinput => 'anyelement_in', @@ -590,9 +638,58 @@ typoutput => 'table_am_handler_out', typreceive => '-', typsend => '-', typalign => 'i' }, { oid => '3831', - descr => 'pseudo-type representing a polymorphic base type that is a range', + descr => 'pseudo-type representing a range over a polymorphic base type', typname => 'anyrange', typlen => '-1', typbyval => 'f', typtype => 'p', typcategory => 'P', typinput => 'anyrange_in', typoutput => 'anyrange_out', typreceive => '-', typsend => '-', typalign => 'd', typstorage => 'x' }, - +{ oid => '5077', + descr => 'pseudo-type representing a polymorphic common type', + typname => 'anycompatible', typlen => '4', typbyval => 't', typtype => 'p', + typcategory => 'P', typinput => 'anycompatible_in', + typoutput => 'anycompatible_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '5078', + descr => 'pseudo-type representing an array of polymorphic common type elements', + typname => 'anycompatiblearray', typlen => '-1', typbyval => 'f', + typtype => 'p', typcategory => 'P', typinput => 'anycompatiblearray_in', + typoutput => 'anycompatiblearray_out', + typreceive => 'anycompatiblearray_recv', typsend => 'anycompatiblearray_send', + typalign => 'd', typstorage => 'x' }, +{ oid => '5079', + descr => 'pseudo-type representing a polymorphic common type that is not an array', + typname => 'anycompatiblenonarray', typlen => '4', typbyval => 't', + typtype => 'p', typcategory => 'P', typinput => 'anycompatiblenonarray_in', + typoutput => 'anycompatiblenonarray_out', typreceive => '-', typsend => '-', + typalign => 'i' }, +{ oid => '5080', + descr => 'pseudo-type representing a range over a polymorphic common type', + typname => 'anycompatiblerange', typlen => '-1', typbyval => 'f', + typtype => 'p', typcategory => 'P', typinput => 'anycompatiblerange_in', + typoutput => 'anycompatiblerange_out', typreceive => '-', typsend => '-', + typalign => 'd', typstorage => 'x' }, +{ oid => '4537', + descr => 'pseudo-type representing a polymorphic base type that is a multirange', + typname => 'anymultirange', typlen => '-1', typbyval => 'f', typtype => 'p', + typcategory => 'P', typinput => 'anymultirange_in', + typoutput => 'anymultirange_out', typreceive => '-', typsend => '-', + typalign => 'd', typstorage => 'x' }, +{ oid => '4538', + descr => 'pseudo-type representing a multirange over a polymorphic common type', + typname => 'anycompatiblemultirange', typlen => '-1', typbyval => 'f', + typtype => 'p', typcategory => 'P', typinput => 'anycompatiblemultirange_in', + typoutput => 'anycompatiblemultirange_out', typreceive => '-', typsend => '-', + typalign => 'd', typstorage => 'x' }, +{ oid => '4600', descr => 'BRIN bloom summary', + typname => 'pg_brin_bloom_summary', typlen => '-1', typbyval => 'f', + typcategory => 'Z', typinput => 'brin_bloom_summary_in', + typoutput => 'brin_bloom_summary_out', + typreceive => 'brin_bloom_summary_recv', typsend => 'brin_bloom_summary_send', + typalign => 'i', typstorage => 'x', typcollation => 'default' }, +{ oid => '4601', descr => 'BRIN minmax-multi summary', + typname => 'pg_brin_minmax_multi_summary', typlen => '-1', typbyval => 'f', + typcategory => 'Z', typinput => 'brin_minmax_multi_summary_in', + typoutput => 'brin_minmax_multi_summary_out', + typreceive => 'brin_minmax_multi_summary_recv', + typsend => 'brin_minmax_multi_summary_send', typalign => 'i', + typstorage => 'x', typcollation => 'default' }, ] diff --git a/codegen/src/sqlstate.rs b/codegen/src/sqlstate.rs index bb21be34f..d21b92eec 100644 --- a/codegen/src/sqlstate.rs +++ b/codegen/src/sqlstate.rs @@ -1,5 +1,4 @@ use linked_hash_map::LinkedHashMap; -use phf_codegen; use std::fs::File; use std::io::{BufWriter, Write}; @@ -11,7 +10,9 @@ pub fn build() { let codes = parse_codes(); make_type(&mut file); + make_code(&codes, &mut file); make_consts(&codes, &mut file); + make_inner(&codes, &mut file); make_map(&codes, &mut file); } @@ -38,26 +39,51 @@ fn make_type(file: &mut BufWriter) { write!( file, "// Autogenerated file - DO NOT EDIT -use std::borrow::Cow; /// A SQLSTATE error code #[derive(PartialEq, Eq, Clone, Debug)] -pub struct SqlState(Cow<'static, str>); +pub struct SqlState(Inner); impl SqlState {{ /// Creates a `SqlState` from its error code. pub fn from_code(s: &str) -> SqlState {{ match SQLSTATE_MAP.get(s) {{ Some(state) => state.clone(), - None => SqlState(Cow::Owned(s.to_string())), + None => SqlState(Inner::Other(s.into())), }} }} +" + ) + .unwrap(); +} +fn make_code(codes: &LinkedHashMap>, file: &mut BufWriter) { + write!( + file, + r#" /// Returns the error code corresponding to the `SqlState`. pub fn code(&self) -> &str {{ - &self.0 + match &self.0 {{"#, + ) + .unwrap(); + + for code in codes.keys() { + write!( + file, + r#" + Inner::E{code} => "{code}","#, + code = code, + ) + .unwrap(); + } + + write!( + file, + r#" + Inner::Other(code) => code, + }} }} -" + "# ) .unwrap(); } @@ -69,7 +95,7 @@ fn make_consts(codes: &LinkedHashMap>, file: &mut BufWriter< file, r#" /// {code} - pub const {name}: SqlState = SqlState(Cow::Borrowed("{code}")); + pub const {name}: SqlState = SqlState(Inner::E{code}); "#, name = name, code = code, @@ -81,6 +107,35 @@ fn make_consts(codes: &LinkedHashMap>, file: &mut BufWriter< write!(file, "}}").unwrap(); } +fn make_inner(codes: &LinkedHashMap>, file: &mut BufWriter) { + write!( + file, + r#" + +#[derive(PartialEq, Eq, Clone, Debug)] +#[allow(clippy::upper_case_acronyms)] +enum Inner {{"#, + ) + .unwrap(); + for code in codes.keys() { + write!( + file, + r#" + E{},"#, + code, + ) + .unwrap(); + } + write!( + file, + r#" + Other(Box), +}} + "#, + ) + .unwrap(); +} + fn make_map(codes: &LinkedHashMap>, file: &mut BufWriter) { let mut builder = phf_codegen::Map::new(); for (code, names) in codes { diff --git a/codegen/src/type_gen.rs b/codegen/src/type_gen.rs index 99a38ce87..fd7a56450 100644 --- a/codegen/src/type_gen.rs +++ b/codegen/src/type_gen.rs @@ -17,6 +17,7 @@ struct Type { variant: String, ident: String, kind: String, + typtype: Option, element: u32, doc: String, } @@ -136,10 +137,7 @@ impl<'a> DatParser<'a> { fn peek(&mut self, target: char) -> bool { self.skip_ws(); - match self.it.peek() { - Some((_, ch)) if *ch == target => true, - _ => false, - } + matches!(self.it.peek(), Some((_, ch)) if *ch == target) } fn eof(&mut self) { @@ -188,6 +186,15 @@ fn parse_types() -> BTreeMap { ) }) .collect::>(); + let multi_range_elements = raw_ranges + .iter() + .map(|m| { + ( + oids_by_name[&*m["rngmultitypid"]], + oids_by_name[&*m["rngsubtype"]], + ) + }) + .collect::>(); let range_vector_re = Regex::new("(range|vector)$").unwrap(); let array_re = Regex::new("^_(.*)").unwrap(); @@ -211,8 +218,18 @@ fn parse_types() -> BTreeMap { continue; } + let typtype = raw_type.get("typtype").cloned(); + let element = match &*kind { - "R" => range_elements[&oid], + "R" => match typtype + .as_ref() + .expect("range type must have typtype") + .as_str() + { + "r" => range_elements[&oid], + "m" => multi_range_elements[&oid], + typtype => panic!("invalid range typtype {}", typtype), + }, "A" => oids_by_name[&raw_type["typelem"]], _ => 0, }; @@ -238,6 +255,7 @@ fn parse_types() -> BTreeMap { variant, ident, kind: "A".to_string(), + typtype: None, element: oid, doc, }; @@ -249,6 +267,7 @@ fn parse_types() -> BTreeMap { variant, ident, kind, + typtype, element, doc, }; @@ -319,46 +338,32 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { .unwrap(); for (oid, type_) in types { - write!( - w, - " {} => Some(Inner::{}), -", - oid, type_.variant - ) - .unwrap(); + writeln!(w, " {} => Some(Inner::{}),", oid, type_.variant).unwrap(); } - write!( + writeln!( w, " _ => None, }} }} pub fn oid(&self) -> Oid {{ - match *self {{ -", + match *self {{", ) .unwrap(); for (oid, type_) in types { - write!( - w, - " Inner::{} => {}, -", - type_.variant, oid - ) - .unwrap(); + writeln!(w, " Inner::{} => {},", type_.variant, oid).unwrap(); } - write!( + writeln!( w, " Inner::Other(ref u) => u.oid, }} }} pub fn kind(&self) -> &Kind {{ - match *self {{ -", + match *self {{", ) .unwrap(); @@ -366,50 +371,55 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { let kind = match &*type_.kind { "P" => "Pseudo".to_owned(), "A" => format!("Array(Type(Inner::{}))", types[&type_.element].variant), - "R" => format!("Range(Type(Inner::{}))", types[&type_.element].variant), + "R" => match type_ + .typtype + .as_ref() + .expect("range type must have typtype") + .as_str() + { + "r" => format!("Range(Type(Inner::{}))", types[&type_.element].variant), + "m" => format!("Multirange(Type(Inner::{}))", types[&type_.element].variant), + typtype => panic!("invalid range typtype {}", typtype), + }, _ => "Simple".to_owned(), }; - write!( + writeln!( w, " Inner::{} => {{ &Kind::{} - }} -", + }}", type_.variant, kind ) .unwrap(); } - write!( + writeln!( w, r#" Inner::Other(ref u) => &u.kind, }} }} pub fn name(&self) -> &str {{ - match *self {{ -"#, + match *self {{"#, ) .unwrap(); for type_ in types.values() { - write!( + writeln!( w, - r#" Inner::{} => "{}", -"#, + r#" Inner::{} => "{}","#, type_.variant, type_.name ) .unwrap(); } - write!( + writeln!( w, " Inner::Other(ref u) => &u.name, }} }} -}} -" +}}" ) .unwrap(); } @@ -417,12 +427,11 @@ fn make_impl(w: &mut BufWriter, types: &BTreeMap) { fn make_consts(w: &mut BufWriter, types: &BTreeMap) { write!(w, "impl Type {{").unwrap(); for type_ in types.values() { - write!( + writeln!( w, " /// {docs} - pub const {ident}: Type = Type(Inner::{variant}); -", + pub const {ident}: Type = Type(Inner::{variant});", docs = type_.doc, ident = type_.ident, variant = type_.variant diff --git a/docker-compose.yml b/docker-compose.yml index d44fbe866..991df2d01 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,10 @@ version: '2' services: postgres: - image: "sfackler/rust-postgres-test:6" + image: docker.io/postgres:17 ports: - - 5433:5433 + - 5433:5433 + volumes: + - ./docker/sql_setup.sh:/docker-entrypoint-initdb.d/sql_setup.sh + environment: + POSTGRES_PASSWORD: postgres diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 1dd7f3db6..000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM postgres:12 - -COPY sql_setup.sh /docker-entrypoint-initdb.d/ diff --git a/docker/sql_setup.sh b/docker/sql_setup.sh index 422dcbda9..0315ac805 100755 --- a/docker/sql_setup.sh +++ b/docker/sql_setup.sh @@ -96,4 +96,5 @@ psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL CREATE ROLE ssl_user LOGIN; CREATE EXTENSION hstore; CREATE EXTENSION citext; + CREATE EXTENSION ltree; EOSQL diff --git a/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.rs b/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.rs new file mode 100644 index 000000000..52d0ba8f6 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.rs @@ -0,0 +1,31 @@ +use postgres_types::{FromSql, ToSql}; + +#[derive(ToSql, Debug)] +#[postgres(allow_mismatch)] +struct ToSqlAllowMismatchStruct { + a: i32, +} + +#[derive(FromSql, Debug)] +#[postgres(allow_mismatch)] +struct FromSqlAllowMismatchStruct { + a: i32, +} + +#[derive(ToSql, Debug)] +#[postgres(allow_mismatch)] +struct ToSqlAllowMismatchTupleStruct(i32, i32); + +#[derive(FromSql, Debug)] +#[postgres(allow_mismatch)] +struct FromSqlAllowMismatchTupleStruct(i32, i32); + +#[derive(FromSql, Debug)] +#[postgres(transparent, allow_mismatch)] +struct TransparentFromSqlAllowMismatchStruct(i32); + +#[derive(FromSql, Debug)] +#[postgres(allow_mismatch, transparent)] +struct AllowMismatchFromSqlTransparentStruct(i32); + +fn main() {} diff --git a/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.stderr b/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.stderr new file mode 100644 index 000000000..a8e573248 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-allow-mismatch.stderr @@ -0,0 +1,43 @@ +error: #[postgres(allow_mismatch)] may only be applied to enums + --> src/compile-fail/invalid-allow-mismatch.rs:4:1 + | +4 | / #[postgres(allow_mismatch)] +5 | | struct ToSqlAllowMismatchStruct { +6 | | a: i32, +7 | | } + | |_^ + +error: #[postgres(allow_mismatch)] may only be applied to enums + --> src/compile-fail/invalid-allow-mismatch.rs:10:1 + | +10 | / #[postgres(allow_mismatch)] +11 | | struct FromSqlAllowMismatchStruct { +12 | | a: i32, +13 | | } + | |_^ + +error: #[postgres(allow_mismatch)] may only be applied to enums + --> src/compile-fail/invalid-allow-mismatch.rs:16:1 + | +16 | / #[postgres(allow_mismatch)] +17 | | struct ToSqlAllowMismatchTupleStruct(i32, i32); + | |_______________________________________________^ + +error: #[postgres(allow_mismatch)] may only be applied to enums + --> src/compile-fail/invalid-allow-mismatch.rs:20:1 + | +20 | / #[postgres(allow_mismatch)] +21 | | struct FromSqlAllowMismatchTupleStruct(i32, i32); + | |_________________________________________________^ + +error: #[postgres(transparent)] is not allowed with #[postgres(allow_mismatch)] + --> src/compile-fail/invalid-allow-mismatch.rs:24:25 + | +24 | #[postgres(transparent, allow_mismatch)] + | ^^^^^^^^^^^^^^ + +error: #[postgres(allow_mismatch)] is not allowed with #[postgres(transparent)] + --> src/compile-fail/invalid-allow-mismatch.rs:28:28 + | +28 | #[postgres(allow_mismatch, transparent)] + | ^^^^^^^^^^^ diff --git a/postgres-derive-test/src/compile-fail/invalid-transparent.rs b/postgres-derive-test/src/compile-fail/invalid-transparent.rs new file mode 100644 index 000000000..43bd48266 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-transparent.rs @@ -0,0 +1,35 @@ +use postgres_types::{FromSql, ToSql}; + +#[derive(ToSql, Debug)] +#[postgres(transparent)] +struct ToSqlTransparentStruct { + a: i32 +} + +#[derive(FromSql, Debug)] +#[postgres(transparent)] +struct FromSqlTransparentStruct { + a: i32 +} + +#[derive(ToSql, Debug)] +#[postgres(transparent)] +enum ToSqlTransparentEnum { + Foo +} + +#[derive(FromSql, Debug)] +#[postgres(transparent)] +enum FromSqlTransparentEnum { + Foo +} + +#[derive(ToSql, Debug)] +#[postgres(transparent)] +struct ToSqlTransparentTwoFieldTupleStruct(i32, i32); + +#[derive(FromSql, Debug)] +#[postgres(transparent)] +struct FromSqlTransparentTwoFieldTupleStruct(i32, i32); + +fn main() {} diff --git a/postgres-derive-test/src/compile-fail/invalid-transparent.stderr b/postgres-derive-test/src/compile-fail/invalid-transparent.stderr new file mode 100644 index 000000000..42e49f874 --- /dev/null +++ b/postgres-derive-test/src/compile-fail/invalid-transparent.stderr @@ -0,0 +1,49 @@ +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:4:1 + | +4 | / #[postgres(transparent)] +5 | | struct ToSqlTransparentStruct { +6 | | a: i32 +7 | | } + | |_^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:10:1 + | +10 | / #[postgres(transparent)] +11 | | struct FromSqlTransparentStruct { +12 | | a: i32 +13 | | } + | |_^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:16:1 + | +16 | / #[postgres(transparent)] +17 | | enum ToSqlTransparentEnum { +18 | | Foo +19 | | } + | |_^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:22:1 + | +22 | / #[postgres(transparent)] +23 | | enum FromSqlTransparentEnum { +24 | | Foo +25 | | } + | |_^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:28:1 + | +28 | / #[postgres(transparent)] +29 | | struct ToSqlTransparentTwoFieldTupleStruct(i32, i32); + | |_____________________________________________________^ + +error: #[postgres(transparent)] may only be applied to single field tuple structs + --> src/compile-fail/invalid-transparent.rs:32:1 + | +32 | / #[postgres(transparent)] +33 | | struct FromSqlTransparentTwoFieldTupleStruct(i32, i32); + | |_______________________________________________________^ diff --git a/postgres-derive-test/src/composites.rs b/postgres-derive-test/src/composites.rs index 5efd3944c..50a22790d 100644 --- a/postgres-derive-test/src/composites.rs +++ b/postgres-derive-test/src/composites.rs @@ -1,4 +1,4 @@ -use crate::test_type; +use crate::{test_type, test_type_asymmetric}; use postgres::{Client, NoTls}; use postgres_types::{FromSql, ToSql, WrongType}; use std::error::Error; @@ -89,6 +89,49 @@ fn name_overrides() { ); } +#[test] +fn rename_all_overrides() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "inventory_item", rename_all = "SCREAMING_SNAKE_CASE")] + struct InventoryItem { + name: String, + supplier_id: i32, + #[postgres(name = "Price")] + price: Option, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + \"NAME\" TEXT, + \"SUPPLIER_ID\" INT, + \"Price\" DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: Some(15.50), + }; + + let item_null = InventoryItem { + name: "foobar".to_owned(), + supplier_id: 100, + price: None, + }; + + test_type( + &mut conn, + "inventory_item", + &[ + (item, "ROW('foobar', 100, 15.50)"), + (item_null, "ROW('foobar', 100, NULL)"), + ], + ); +} + #[test] fn wrong_name() { #[derive(FromSql, ToSql, Debug, PartialEq)] @@ -215,3 +258,91 @@ fn wrong_type() { .unwrap_err(); assert!(err.source().unwrap().is::()); } + +#[test] +fn raw_ident_field() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(name = "inventory_item")] + struct InventoryItem { + r#type: String, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.inventory_item AS ( + type TEXT + )", + ) + .unwrap(); + + let item = InventoryItem { + r#type: "foo".to_owned(), + }; + + test_type(&mut conn, "inventory_item", &[(item, "ROW('foo')")]); +} + +#[test] +fn generics() { + #[derive(FromSql, Debug, PartialEq)] + struct InventoryItem + where + U: Clone, + { + name: String, + supplier_id: T, + price: Option, + } + + // doesn't make sense to implement derived FromSql on a type with borrows + #[derive(ToSql, Debug, PartialEq)] + #[postgres(name = "InventoryItem")] + struct InventoryItemRef<'a, T: 'a + Clone, U> + where + U: 'a + Clone, + { + name: &'a str, + supplier_id: &'a T, + price: Option<&'a U>, + } + + const NAME: &str = "foobar"; + const SUPPLIER_ID: i32 = 100; + const PRICE: f64 = 15.50; + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.batch_execute( + "CREATE TYPE pg_temp.\"InventoryItem\" AS ( + name TEXT, + supplier_id INT, + price DOUBLE PRECISION + );", + ) + .unwrap(); + + let item = InventoryItemRef { + name: NAME, + supplier_id: &SUPPLIER_ID, + price: Some(&PRICE), + }; + + let item_null = InventoryItemRef { + name: NAME, + supplier_id: &SUPPLIER_ID, + price: None, + }; + + test_type_asymmetric( + &mut conn, + "\"InventoryItem\"", + &[ + (item, "ROW('foobar', 100, 15.50)"), + (item_null, "ROW('foobar', 100, NULL)"), + ], + |t: &InventoryItemRef, f: &InventoryItem| { + t.name == f.name.as_str() + && t.supplier_id == &f.supplier_id + && t.price == f.price.as_ref() + }, + ); +} diff --git a/postgres-derive-test/src/enums.rs b/postgres-derive-test/src/enums.rs index a7039ca05..f3e6c488c 100644 --- a/postgres-derive-test/src/enums.rs +++ b/postgres-derive-test/src/enums.rs @@ -1,5 +1,5 @@ use crate::test_type; -use postgres::{Client, NoTls}; +use postgres::{error::DbError, Client, NoTls}; use postgres_types::{FromSql, ToSql, WrongType}; use std::error::Error; @@ -53,6 +53,35 @@ fn name_overrides() { ); } +#[test] +fn rename_all_overrides() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(name = "mood", rename_all = "snake_case")] + enum Mood { + VerySad, + #[postgres(name = "okay")] + Ok, + VeryHappy, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute( + "CREATE TYPE pg_temp.mood AS ENUM ('very_sad', 'okay', 'very_happy')", + &[], + ) + .unwrap(); + + test_type( + &mut conn, + "mood", + &[ + (Mood::VerySad, "'very_sad'"), + (Mood::Ok, "'okay'"), + (Mood::VeryHappy, "'very_happy'"), + ], + ); +} + #[test] fn wrong_name() { #[derive(Debug, ToSql, FromSql, PartialEq)] @@ -102,3 +131,73 @@ fn missing_variant() { let err = conn.execute("SELECT $1::foo", &[&Foo::Bar]).unwrap_err(); assert!(err.source().unwrap().is::()); } + +#[test] +fn allow_mismatch_enums() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(allow_mismatch)] + enum Foo { + Bar, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.\"Foo\" AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let row = conn.query_one("SELECT $1::\"Foo\"", &[&Foo::Bar]).unwrap(); + assert_eq!(row.get::<_, Foo>(0), Foo::Bar); +} + +#[test] +fn missing_enum_variant() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(allow_mismatch)] + enum Foo { + Bar, + Buz, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.\"Foo\" AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let err = conn + .query_one("SELECT $1::\"Foo\"", &[&Foo::Buz]) + .unwrap_err(); + assert!(err.source().unwrap().is::()); +} + +#[test] +fn allow_mismatch_and_renaming() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(name = "foo", allow_mismatch)] + enum Foo { + #[postgres(name = "bar")] + Bar, + #[postgres(name = "buz")] + Buz, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.foo AS ENUM ('bar', 'baz', 'buz')", &[]) + .unwrap(); + + let row = conn.query_one("SELECT $1::foo", &[&Foo::Buz]).unwrap(); + assert_eq!(row.get::<_, Foo>(0), Foo::Buz); +} + +#[test] +fn wrong_name_and_allow_mismatch() { + #[derive(Debug, ToSql, FromSql, PartialEq)] + #[postgres(allow_mismatch)] + enum Foo { + Bar, + } + + let mut conn = Client::connect("user=postgres host=localhost port=5433", NoTls).unwrap(); + conn.execute("CREATE TYPE pg_temp.foo AS ENUM ('Bar', 'Baz')", &[]) + .unwrap(); + + let err = conn.query_one("SELECT $1::foo", &[&Foo::Bar]).unwrap_err(); + assert!(err.source().unwrap().is::()); +} diff --git a/postgres-derive-test/src/lib.rs b/postgres-derive-test/src/lib.rs index 7da75af8f..f0534f32c 100644 --- a/postgres-derive-test/src/lib.rs +++ b/postgres-derive-test/src/lib.rs @@ -7,25 +7,50 @@ use std::fmt; mod composites; mod domains; mod enums; +mod transparent; pub fn test_type(conn: &mut Client, sql_type: &str, checks: &[(T, S)]) where T: PartialEq + FromSqlOwned + ToSql + Sync, S: fmt::Display, { - for &(ref val, ref repr) in checks.iter() { + for (val, repr) in checks.iter() { let stmt = conn - .prepare(&*format!("SELECT {}::{}", *repr, sql_type)) + .prepare(&format!("SELECT {}::{}", *repr, sql_type)) .unwrap(); let result = conn.query_one(&stmt, &[]).unwrap().get(0); assert_eq!(val, &result); - let stmt = conn.prepare(&*format!("SELECT $1::{}", sql_type)).unwrap(); + let stmt = conn.prepare(&format!("SELECT $1::{}", sql_type)).unwrap(); let result = conn.query_one(&stmt, &[val]).unwrap().get(0); assert_eq!(val, &result); } } +pub fn test_type_asymmetric( + conn: &mut Client, + sql_type: &str, + checks: &[(T, S)], + cmp: C, +) where + T: ToSql + Sync, + F: FromSqlOwned, + S: fmt::Display, + C: Fn(&T, &F) -> bool, +{ + for (val, repr) in checks.iter() { + let stmt = conn + .prepare(&format!("SELECT {}::{}", *repr, sql_type)) + .unwrap(); + let result: F = conn.query_one(&stmt, &[]).unwrap().get(0); + assert!(cmp(val, &result)); + + let stmt = conn.prepare(&format!("SELECT $1::{}", sql_type)).unwrap(); + let result: F = conn.query_one(&stmt, &[val]).unwrap().get(0); + assert!(cmp(val, &result)); + } +} + #[test] fn compile_fail() { trybuild::TestCases::new().compile_fail("src/compile-fail/*.rs"); diff --git a/postgres-derive-test/src/transparent.rs b/postgres-derive-test/src/transparent.rs new file mode 100644 index 000000000..1614553d2 --- /dev/null +++ b/postgres-derive-test/src/transparent.rs @@ -0,0 +1,18 @@ +use postgres::{Client, NoTls}; +use postgres_types::{FromSql, ToSql}; + +#[test] +fn round_trip() { + #[derive(FromSql, ToSql, Debug, PartialEq)] + #[postgres(transparent)] + struct UserId(i32); + + assert_eq!( + Client::connect("user=postgres host=localhost port=5433", NoTls) + .unwrap() + .query_one("SELECT $1::integer", &[&UserId(123)]) + .unwrap() + .get::<_, UserId>(0), + UserId(123) + ); +} diff --git a/postgres-derive/CHANGELOG.md b/postgres-derive/CHANGELOG.md index 354f6f277..1532b307c 100644 --- a/postgres-derive/CHANGELOG.md +++ b/postgres-derive/CHANGELOG.md @@ -1,5 +1,42 @@ # Change Log +## v0.4.6 - 2024-09-15 + +### Changed + +* Upgraded `heck`. + +## v0.4.5 - 2023-08-19 + +### Added + +* Added a `rename_all` option for enum and struct derives. +* Added an `allow_mismatch` option to disable strict enum variant checks against the Postgres type. + +## v0.4.4 - 2023-03-27 + +### Changed + +* Upgraded `syn`. + +## v0.4.3 - 2022-09-07 + +### Added + +* Added support for parameterized structs. + +## v0.4.2 - 2022-04-30 + +### Added + +* Added support for transparent wrapper types. + +## v0.4.1 - 2021-11-23 + +### Fixed + +* Fixed handling of struct fields using raw identifiers. + ## v0.4.0 - 2019-12-23 No changes diff --git a/postgres-derive/Cargo.toml b/postgres-derive/Cargo.toml index 293c294a0..96600f124 100644 --- a/postgres-derive/Cargo.toml +++ b/postgres-derive/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "postgres-derive" -version = "0.4.0" +version = "0.4.6" authors = ["Steven Fackler "] -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" edition = "2018" description = "An internal crate used by postgres-types" repository = "https://github.com/sfackler/rust-postgres" @@ -12,6 +12,7 @@ proc-macro = true test = false [dependencies] -syn = "1.0" +syn = "2.0" proc-macro2 = "1.0" quote = "1.0" +heck = "0.5" diff --git a/postgres-derive/LICENSE-APACHE b/postgres-derive/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/postgres-derive/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/postgres-derive/LICENSE-APACHE b/postgres-derive/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/postgres-derive/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-derive/LICENSE-MIT b/postgres-derive/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/postgres-derive/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/postgres-derive/LICENSE-MIT b/postgres-derive/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/postgres-derive/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-derive/src/accepts.rs b/postgres-derive/src/accepts.rs index 530badd0b..a68538dcc 100644 --- a/postgres-derive/src/accepts.rs +++ b/postgres-derive/src/accepts.rs @@ -6,6 +6,14 @@ use syn::Ident; use crate::composites::Field; use crate::enums::Variant; +pub fn transparent_body(field: &syn::Field) -> TokenStream { + let ty = &field.ty; + + quote! { + <#ty as ::postgres_types::ToSql>::accepts(type_) + } +} + pub fn domain_body(name: &str, field: &syn::Field) -> TokenStream { let ty = &field.ty; @@ -23,31 +31,37 @@ pub fn domain_body(name: &str, field: &syn::Field) -> TokenStream { } } -pub fn enum_body(name: &str, variants: &[Variant]) -> TokenStream { +pub fn enum_body(name: &str, variants: &[Variant], allow_mismatch: bool) -> TokenStream { let num_variants = variants.len(); let variant_names = variants.iter().map(|v| &v.name); - quote! { - if type_.name() != #name { - return false; + if allow_mismatch { + quote! { + type_.name() == #name } + } else { + quote! { + if type_.name() != #name { + return false; + } - match *type_.kind() { - ::postgres_types::Kind::Enum(ref variants) => { - if variants.len() != #num_variants { - return false; - } - - variants.iter().all(|v| { - match &**v { - #( - #variant_names => true, - )* - _ => false, + match *type_.kind() { + ::postgres_types::Kind::Enum(ref variants) => { + if variants.len() != #num_variants { + return false; } - }) + + variants.iter().all(|v| { + match &**v { + #( + #variant_names => true, + )* + _ => false, + } + }) + } + _ => false, } - _ => false, } } } diff --git a/postgres-derive/src/case.rs b/postgres-derive/src/case.rs new file mode 100644 index 000000000..20ecc8eed --- /dev/null +++ b/postgres-derive/src/case.rs @@ -0,0 +1,110 @@ +#[allow(deprecated, unused_imports)] +use std::ascii::AsciiExt; + +use heck::{ + ToKebabCase, ToLowerCamelCase, ToShoutyKebabCase, ToShoutySnakeCase, ToSnakeCase, ToTrainCase, + ToUpperCamelCase, +}; + +use self::RenameRule::*; + +/// The different possible ways to change case of fields in a struct, or variants in an enum. +#[allow(clippy::enum_variant_names)] +#[derive(Copy, Clone, PartialEq)] +pub enum RenameRule { + /// Rename direct children to "lowercase" style. + LowerCase, + /// Rename direct children to "UPPERCASE" style. + UpperCase, + /// Rename direct children to "PascalCase" style, as typically used for + /// enum variants. + PascalCase, + /// Rename direct children to "camelCase" style. + CamelCase, + /// Rename direct children to "snake_case" style, as commonly used for + /// fields. + SnakeCase, + /// Rename direct children to "SCREAMING_SNAKE_CASE" style, as commonly + /// used for constants. + ScreamingSnakeCase, + /// Rename direct children to "kebab-case" style. + KebabCase, + /// Rename direct children to "SCREAMING-KEBAB-CASE" style. + ScreamingKebabCase, + + /// Rename direct children to "Train-Case" style. + TrainCase, +} + +pub const RENAME_RULES: &[&str] = &[ + "lowercase", + "UPPERCASE", + "PascalCase", + "camelCase", + "snake_case", + "SCREAMING_SNAKE_CASE", + "kebab-case", + "SCREAMING-KEBAB-CASE", + "Train-Case", +]; + +impl RenameRule { + pub fn from_str(rule: &str) -> Option { + match rule { + "lowercase" => Some(LowerCase), + "UPPERCASE" => Some(UpperCase), + "PascalCase" => Some(PascalCase), + "camelCase" => Some(CamelCase), + "snake_case" => Some(SnakeCase), + "SCREAMING_SNAKE_CASE" => Some(ScreamingSnakeCase), + "kebab-case" => Some(KebabCase), + "SCREAMING-KEBAB-CASE" => Some(ScreamingKebabCase), + "Train-Case" => Some(TrainCase), + _ => None, + } + } + /// Apply a renaming rule to an enum or struct field, returning the version expected in the source. + pub fn apply_to_field(&self, variant: &str) -> String { + match *self { + LowerCase => variant.to_lowercase(), + UpperCase => variant.to_uppercase(), + PascalCase => variant.to_upper_camel_case(), + CamelCase => variant.to_lower_camel_case(), + SnakeCase => variant.to_snake_case(), + ScreamingSnakeCase => variant.to_shouty_snake_case(), + KebabCase => variant.to_kebab_case(), + ScreamingKebabCase => variant.to_shouty_kebab_case(), + TrainCase => variant.to_train_case(), + } + } +} + +#[test] +fn rename_field() { + for &(original, lower, upper, camel, snake, screaming, kebab, screaming_kebab) in &[ + ( + "Outcome", "outcome", "OUTCOME", "outcome", "outcome", "OUTCOME", "outcome", "OUTCOME", + ), + ( + "VeryTasty", + "verytasty", + "VERYTASTY", + "veryTasty", + "very_tasty", + "VERY_TASTY", + "very-tasty", + "VERY-TASTY", + ), + ("A", "a", "A", "a", "a", "A", "a", "A"), + ("Z42", "z42", "Z42", "z42", "z42", "Z42", "z42", "Z42"), + ] { + assert_eq!(LowerCase.apply_to_field(original), lower); + assert_eq!(UpperCase.apply_to_field(original), upper); + assert_eq!(PascalCase.apply_to_field(original), original); + assert_eq!(CamelCase.apply_to_field(original), camel); + assert_eq!(SnakeCase.apply_to_field(original), snake); + assert_eq!(ScreamingSnakeCase.apply_to_field(original), screaming); + assert_eq!(KebabCase.apply_to_field(original), kebab); + assert_eq!(ScreamingKebabCase.apply_to_field(original), screaming_kebab); + } +} diff --git a/postgres-derive/src/composites.rs b/postgres-derive/src/composites.rs index f5599d375..b6aad8ab3 100644 --- a/postgres-derive/src/composites.rs +++ b/postgres-derive/src/composites.rs @@ -1,6 +1,10 @@ -use syn::{Error, Ident, Type}; +use proc_macro2::Span; +use syn::{ + punctuated::Punctuated, Error, GenericParam, Generics, Ident, Path, PathSegment, Type, + TypeParamBound, +}; -use crate::overrides::Overrides; +use crate::{case::RenameRule, overrides::Overrides}; pub struct Field { pub name: String, @@ -9,14 +13,48 @@ pub struct Field { } impl Field { - pub fn parse(raw: &syn::Field) -> Result { - let overrides = Overrides::extract(&raw.attrs)?; - + pub fn parse(raw: &syn::Field, rename_all: Option) -> Result { + let overrides = Overrides::extract(&raw.attrs, false)?; let ident = raw.ident.as_ref().unwrap().clone(); + + // field level name override takes precendence over container level rename_all override + let name = match overrides.name { + Some(n) => n, + None => { + let name = ident.to_string(); + let stripped = name.strip_prefix("r#").map(String::from).unwrap_or(name); + + match rename_all { + Some(rule) => rule.apply_to_field(&stripped), + None => stripped, + } + } + }; + Ok(Field { - name: overrides.name.unwrap_or_else(|| ident.to_string()), + name, ident, type_: raw.ty.clone(), }) } } + +pub(crate) fn append_generic_bound(mut generics: Generics, bound: &TypeParamBound) -> Generics { + for param in &mut generics.params { + if let GenericParam::Type(param) = param { + param.bounds.push(bound.to_owned()) + } + } + generics +} + +pub(crate) fn new_derive_path(last: PathSegment) -> Path { + let mut path = Path { + leading_colon: None, + segments: Punctuated::new(), + }; + path.segments + .push(Ident::new("postgres_types", Span::call_site()).into()); + path.segments.push(last); + path +} diff --git a/postgres-derive/src/enums.rs b/postgres-derive/src/enums.rs index 3c6bc7113..9a6dfa926 100644 --- a/postgres-derive/src/enums.rs +++ b/postgres-derive/src/enums.rs @@ -1,6 +1,6 @@ use syn::{Error, Fields, Ident}; -use crate::overrides::Overrides; +use crate::{case::RenameRule, overrides::Overrides}; pub struct Variant { pub ident: Ident, @@ -8,7 +8,7 @@ pub struct Variant { } impl Variant { - pub fn parse(raw: &syn::Variant) -> Result { + pub fn parse(raw: &syn::Variant, rename_all: Option) -> Result { match raw.fields { Fields::Unit => {} _ => { @@ -18,11 +18,16 @@ impl Variant { )) } } + let overrides = Overrides::extract(&raw.attrs, false)?; - let overrides = Overrides::extract(&raw.attrs)?; + // variant level name override takes precendence over container level rename_all override + let name = overrides.name.unwrap_or_else(|| match rename_all { + Some(rule) => rule.apply_to_field(&raw.ident.to_string()), + None => raw.ident.to_string(), + }); Ok(Variant { ident: raw.ident.clone(), - name: overrides.name.unwrap_or_else(|| raw.ident.to_string()), + name, }) } } diff --git a/postgres-derive/src/fromsql.rs b/postgres-derive/src/fromsql.rs index e1ab6ffa7..d3ac47f4f 100644 --- a/postgres-derive/src/fromsql.rs +++ b/postgres-derive/src/fromsql.rs @@ -1,27 +1,83 @@ use proc_macro2::{Span, TokenStream}; -use quote::quote; +use quote::{format_ident, quote}; use std::iter; -use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; +use syn::{ + punctuated::Punctuated, token, AngleBracketedGenericArguments, Data, DataStruct, DeriveInput, + Error, Fields, GenericArgument, GenericParam, Generics, Ident, Lifetime, PathArguments, + PathSegment, +}; +use syn::{LifetimeParam, TraitBound, TraitBoundModifier, TypeParamBound}; use crate::accepts; use crate::composites::Field; +use crate::composites::{append_generic_bound, new_derive_path}; use crate::enums::Variant; use crate::overrides::Overrides; pub fn expand_derive_fromsql(input: DeriveInput) -> Result { - let overrides = Overrides::extract(&input.attrs)?; + let overrides = Overrides::extract(&input.attrs, true)?; - let name = overrides.name.unwrap_or_else(|| input.ident.to_string()); + if (overrides.name.is_some() || overrides.rename_all.is_some()) && overrides.transparent { + return Err(Error::new_spanned( + &input, + "#[postgres(transparent)] is not allowed with #[postgres(name = \"...\")] or #[postgres(rename_all = \"...\")]", + )); + } + + let name = overrides + .name + .clone() + .unwrap_or_else(|| input.ident.to_string()); - let (accepts_body, to_sql_body) = match input.data { + let (accepts_body, to_sql_body) = if overrides.transparent { + match input.data { + Data::Struct(DataStruct { + fields: Fields::Unnamed(ref fields), + .. + }) if fields.unnamed.len() == 1 => { + let field = fields.unnamed.first().unwrap(); + ( + accepts::transparent_body(field), + transparent_body(&input.ident, field), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[postgres(transparent)] may only be applied to single field tuple structs", + )) + } + } + } else if overrides.allow_mismatch { + match input.data { + Data::Enum(ref data) => { + let variants = data + .variants + .iter() + .map(|variant| Variant::parse(variant, overrides.rename_all)) + .collect::, _>>()?; + ( + accepts::enum_body(&name, &variants, overrides.allow_mismatch), + enum_body(&input.ident, &variants), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[postgres(allow_mismatch)] may only be applied to enums", + )); + } + } + } else { + match input.data { Data::Enum(ref data) => { let variants = data .variants .iter() - .map(Variant::parse) + .map(|variant| Variant::parse(variant, overrides.rename_all)) .collect::, _>>()?; ( - accepts::enum_body(&name, &variants), + accepts::enum_body(&name, &variants, overrides.allow_mismatch), enum_body(&input.ident, &variants), ) } @@ -42,7 +98,7 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { let fields = fields .named .iter() - .map(Field::parse) + .map(|field| Field::parse(field, overrides.rename_all)) .collect::, _>>()?; ( accepts::composite_body(&name, "FromSql", &fields), @@ -55,13 +111,17 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { "#[derive(FromSql)] may only be applied to structs, single field tuple structs, and enums", )) } + } }; let ident = &input.ident; + let (generics, lifetime) = build_generics(&input.generics); + let (impl_generics, _, _) = generics.split_for_impl(); + let (_, ty_generics, where_clause) = input.generics.split_for_impl(); let out = quote! { - impl<'a> postgres_types::FromSql<'a> for #ident { - fn from_sql(_type: &postgres_types::Type, buf: &'a [u8]) - -> std::result::Result<#ident, + impl #impl_generics postgres_types::FromSql<#lifetime> for #ident #ty_generics #where_clause { + fn from_sql(_type: &postgres_types::Type, buf: &#lifetime [u8]) + -> std::result::Result<#ident #ty_generics, std::boxed::Box> { @@ -77,6 +137,13 @@ pub fn expand_derive_fromsql(input: DeriveInput) -> Result { Ok(out) } +fn transparent_body(ident: &Ident, field: &syn::Field) -> TokenStream { + let ty = &field.ty; + quote! { + <#ty as postgres_types::FromSql>::from_sql(_type, buf).map(#ident) + } +} + fn enum_body(ident: &Ident, variants: &[Variant]) -> TokenStream { let variant_names = variants.iter().map(|v| &v.name); let idents = iter::repeat(ident); @@ -119,7 +186,7 @@ fn domain_body(ident: &Ident, field: &syn::Field) -> TokenStream { fn composite_body(ident: &Ident, fields: &[Field]) -> TokenStream { let temp_vars = &fields .iter() - .map(|f| Ident::new(&format!("__{}", f.ident), Span::call_site())) + .map(|f| format_ident!("__{}", f.ident)) .collect::>(); let field_names = &fields.iter().map(|f| &f.name).collect::>(); let field_idents = &fields.iter().map(|f| &f.ident).collect::>(); @@ -165,3 +232,35 @@ fn composite_body(ident: &Ident, fields: &[Field]) -> TokenStream { }) } } + +fn build_generics(source: &Generics) -> (Generics, Lifetime) { + // don't worry about lifetime name collisions, it doesn't make sense to derive FromSql on a struct with a lifetime + let lifetime = Lifetime::new("'a", Span::call_site()); + + let mut out = append_generic_bound(source.to_owned(), &new_fromsql_bound(&lifetime)); + out.params.insert( + 0, + GenericParam::Lifetime(LifetimeParam::new(lifetime.to_owned())), + ); + + (out, lifetime) +} + +fn new_fromsql_bound(lifetime: &Lifetime) -> TypeParamBound { + let mut path_segment: PathSegment = Ident::new("FromSql", Span::call_site()).into(); + let mut seg_args = Punctuated::new(); + seg_args.push(GenericArgument::Lifetime(lifetime.to_owned())); + path_segment.arguments = PathArguments::AngleBracketed(AngleBracketedGenericArguments { + colon2_token: None, + lt_token: token::Lt::default(), + args: seg_args, + gt_token: token::Gt::default(), + }); + + TypeParamBound::Trait(TraitBound { + lifetimes: None, + modifier: TraitBoundModifier::None, + paren_token: None, + path: new_derive_path(path_segment), + }) +} diff --git a/postgres-derive/src/lib.rs b/postgres-derive/src/lib.rs index fd17b9de6..b849096c9 100644 --- a/postgres-derive/src/lib.rs +++ b/postgres-derive/src/lib.rs @@ -4,8 +4,10 @@ extern crate proc_macro; use proc_macro::TokenStream; +use syn::parse_macro_input; mod accepts; +mod case; mod composites; mod enums; mod fromsql; @@ -14,7 +16,8 @@ mod tosql; #[proc_macro_derive(ToSql, attributes(postgres))] pub fn derive_tosql(input: TokenStream) -> TokenStream { - let input = syn::parse(input).unwrap(); + let input = parse_macro_input!(input); + tosql::expand_derive_tosql(input) .unwrap_or_else(|e| e.to_compile_error()) .into() @@ -22,7 +25,8 @@ pub fn derive_tosql(input: TokenStream) -> TokenStream { #[proc_macro_derive(FromSql, attributes(postgres))] pub fn derive_fromsql(input: TokenStream) -> TokenStream { - let input = syn::parse(input).unwrap(); + let input = parse_macro_input!(input); + fromsql::expand_derive_fromsql(input) .unwrap_or_else(|e| e.to_compile_error()) .into() diff --git a/postgres-derive/src/overrides.rs b/postgres-derive/src/overrides.rs index 08e6f3a77..d50550bee 100644 --- a/postgres-derive/src/overrides.rs +++ b/postgres-derive/src/overrides.rs @@ -1,45 +1,102 @@ -use syn::{Attribute, Error, Lit, Meta, NestedMeta}; +use syn::punctuated::Punctuated; +use syn::{Attribute, Error, Expr, ExprLit, Lit, Meta, Token}; + +use crate::case::{RenameRule, RENAME_RULES}; pub struct Overrides { pub name: Option, + pub rename_all: Option, + pub transparent: bool, + pub allow_mismatch: bool, } impl Overrides { - pub fn extract(attrs: &[Attribute]) -> Result { - let mut overrides = Overrides { name: None }; + pub fn extract(attrs: &[Attribute], container_attr: bool) -> Result { + let mut overrides = Overrides { + name: None, + rename_all: None, + transparent: false, + allow_mismatch: false, + }; for attr in attrs { - let attr = match attr.parse_meta() { - Ok(meta) => meta, - Err(_) => continue, - }; - if !attr.path().is_ident("postgres") { continue; } - let list = match attr { + let list = match &attr.meta { Meta::List(ref list) => list, bad => return Err(Error::new_spanned(bad, "expected a #[postgres(...)]")), }; - for item in &list.nested { + let nested = list.parse_args_with(Punctuated::::parse_terminated)?; + + for item in nested { match item { - NestedMeta::Meta(Meta::NameValue(meta)) => { - if !meta.path.is_ident("name") { + Meta::NameValue(meta) => { + let name_override = meta.path.is_ident("name"); + let rename_all_override = meta.path.is_ident("rename_all"); + if !container_attr && rename_all_override { + return Err(Error::new_spanned( + &meta.path, + "rename_all is a container attribute", + )); + } + if !name_override && !rename_all_override { return Err(Error::new_spanned(&meta.path, "unknown override")); } - let value = match &meta.lit { - Lit::Str(s) => s.value(), + let value = match &meta.value { + Expr::Lit(ExprLit { + lit: Lit::Str(lit), .. + }) => lit.value(), bad => { return Err(Error::new_spanned(bad, "expected a string literal")) } }; - overrides.name = Some(value); + if name_override { + overrides.name = Some(value); + } else if rename_all_override { + let rename_rule = RenameRule::from_str(&value).ok_or_else(|| { + Error::new_spanned( + &meta.value, + format!( + "invalid rename_all rule, expected one of: {}", + RENAME_RULES + .iter() + .map(|rule| format!("\"{}\"", rule)) + .collect::>() + .join(", ") + ), + ) + })?; + + overrides.rename_all = Some(rename_rule); + } + } + Meta::Path(path) => { + if path.is_ident("transparent") { + if overrides.allow_mismatch { + return Err(Error::new_spanned( + path, + "#[postgres(allow_mismatch)] is not allowed with #[postgres(transparent)]", + )); + } + overrides.transparent = true; + } else if path.is_ident("allow_mismatch") { + if overrides.transparent { + return Err(Error::new_spanned( + path, + "#[postgres(transparent)] is not allowed with #[postgres(allow_mismatch)]", + )); + } + overrides.allow_mismatch = true; + } else { + return Err(Error::new_spanned(path, "unknown override")); + } } - bad => return Err(Error::new_spanned(bad, "expected a name-value meta item")), + bad => return Err(Error::new_spanned(bad, "unknown attribute")), } } } diff --git a/postgres-derive/src/tosql.rs b/postgres-derive/src/tosql.rs index a1c87b0ff..81d4834bf 100644 --- a/postgres-derive/src/tosql.rs +++ b/postgres-derive/src/tosql.rs @@ -1,62 +1,118 @@ -use proc_macro2::TokenStream; +use proc_macro2::{Span, TokenStream}; use quote::quote; use std::iter; -use syn::{Data, DataStruct, DeriveInput, Error, Fields, Ident}; +use syn::{ + Data, DataStruct, DeriveInput, Error, Fields, Ident, TraitBound, TraitBoundModifier, + TypeParamBound, +}; use crate::accepts; use crate::composites::Field; +use crate::composites::{append_generic_bound, new_derive_path}; use crate::enums::Variant; use crate::overrides::Overrides; pub fn expand_derive_tosql(input: DeriveInput) -> Result { - let overrides = Overrides::extract(&input.attrs)?; - - let name = overrides.name.unwrap_or_else(|| input.ident.to_string()); - - let (accepts_body, to_sql_body) = match input.data { - Data::Enum(ref data) => { - let variants = data - .variants - .iter() - .map(Variant::parse) - .collect::, _>>()?; - ( - accepts::enum_body(&name, &variants), - enum_body(&input.ident, &variants), - ) - } - Data::Struct(DataStruct { - fields: Fields::Unnamed(ref fields), - .. - }) if fields.unnamed.len() == 1 => { - let field = fields.unnamed.first().unwrap(); - (accepts::domain_body(&name, &field), domain_body()) + let overrides = Overrides::extract(&input.attrs, true)?; + + if (overrides.name.is_some() || overrides.rename_all.is_some()) && overrides.transparent { + return Err(Error::new_spanned( + &input, + "#[postgres(transparent)] is not allowed with #[postgres(name = \"...\")] or #[postgres(rename_all = \"...\")]", + )); + } + + let name = overrides + .name + .clone() + .unwrap_or_else(|| input.ident.to_string()); + + let (accepts_body, to_sql_body) = if overrides.transparent { + match input.data { + Data::Struct(DataStruct { + fields: Fields::Unnamed(ref fields), + .. + }) if fields.unnamed.len() == 1 => { + let field = fields.unnamed.first().unwrap(); + + (accepts::transparent_body(field), transparent_body()) + } + _ => { + return Err(Error::new_spanned( + input, + "#[postgres(transparent)] may only be applied to single field tuple structs", + )); + } } - Data::Struct(DataStruct { - fields: Fields::Named(ref fields), - .. - }) => { - let fields = fields - .named - .iter() - .map(Field::parse) - .collect::, _>>()?; - ( - accepts::composite_body(&name, "ToSql", &fields), - composite_body(&fields), - ) + } else if overrides.allow_mismatch { + match input.data { + Data::Enum(ref data) => { + let variants = data + .variants + .iter() + .map(|variant| Variant::parse(variant, overrides.rename_all)) + .collect::, _>>()?; + ( + accepts::enum_body(&name, &variants, overrides.allow_mismatch), + enum_body(&input.ident, &variants), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[postgres(allow_mismatch)] may only be applied to enums", + )); + } } - _ => { - return Err(Error::new_spanned( - input, - "#[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums", - )); + } else { + match input.data { + Data::Enum(ref data) => { + let variants = data + .variants + .iter() + .map(|variant| Variant::parse(variant, overrides.rename_all)) + .collect::, _>>()?; + ( + accepts::enum_body(&name, &variants, overrides.allow_mismatch), + enum_body(&input.ident, &variants), + ) + } + Data::Struct(DataStruct { + fields: Fields::Unnamed(ref fields), + .. + }) if fields.unnamed.len() == 1 => { + let field = fields.unnamed.first().unwrap(); + + (accepts::domain_body(&name, field), domain_body()) + } + Data::Struct(DataStruct { + fields: Fields::Named(ref fields), + .. + }) => { + let fields = fields + .named + .iter() + .map(|field| Field::parse(field, overrides.rename_all)) + .collect::, _>>()?; + ( + accepts::composite_body(&name, "ToSql", &fields), + composite_body(&fields), + ) + } + _ => { + return Err(Error::new_spanned( + input, + "#[derive(ToSql)] may only be applied to structs, single field tuple structs, and enums", + )); + } } }; let ident = &input.ident; + let generics = append_generic_bound(input.generics.to_owned(), &new_tosql_bound()); + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let out = quote! { - impl postgres_types::ToSql for #ident { + impl#impl_generics postgres_types::ToSql for #ident#ty_generics #where_clause { fn to_sql(&self, _type: &postgres_types::Type, buf: &mut postgres_types::private::BytesMut) @@ -78,6 +134,12 @@ pub fn expand_derive_tosql(input: DeriveInput) -> Result { Ok(out) } +fn transparent_body() -> TokenStream { + quote! { + postgres_types::ToSql::to_sql(&self.0, _type, buf) + } +} + fn enum_body(ident: &Ident, variants: &[Variant]) -> TokenStream { let idents = iter::repeat(ident); let variant_idents = variants.iter().map(|v| &v.ident); @@ -148,3 +210,12 @@ fn composite_body(fields: &[Field]) -> TokenStream { std::result::Result::Ok(postgres_types::IsNull::No) } } + +fn new_tosql_bound() -> TypeParamBound { + TypeParamBound::Trait(TraitBound { + lifetimes: None, + modifier: TraitBoundModifier::None, + paren_token: None, + path: new_derive_path(Ident::new("ToSql", Span::call_site()).into()), + }) +} diff --git a/postgres-native-tls/CHANGELOG.md b/postgres-native-tls/CHANGELOG.md index fd9180b3d..5fe0a9c7a 100644 --- a/postgres-native-tls/CHANGELOG.md +++ b/postgres-native-tls/CHANGELOG.md @@ -1,5 +1,23 @@ # Change Log +## v0.5.1 - 2025-02-02 + +### Added + +* Added `set_postgresql_alpn`. + +## v0.5.0 - 2020-12-25 + +### Changed + +* Upgraded to `tokio-postgres` 0.7. + +## v0.4.0 - 2020-10-17 + +### Changed + +* Upgraded to `tokio-postgres` 0.6. + ## v0.3.0 - 2019-12-23 ### Changed diff --git a/postgres-native-tls/Cargo.toml b/postgres-native-tls/Cargo.toml index e2d60d1fa..f79ae5491 100644 --- a/postgres-native-tls/Cargo.toml +++ b/postgres-native-tls/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "postgres-native-tls" -version = "0.3.0" +version = "0.5.1" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "TLS support for tokio-postgres via native-tls" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" @@ -16,13 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -bytes = "0.5" -futures = "0.3" -native-tls = "0.2" -tokio = "0.2" -tokio-tls = "0.3" -tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-features = false } +native-tls = { version = "0.2", features = ["alpn"] } +tokio = "1.0" +tokio-native-tls = "0.3" +tokio-postgres = { version = "0.7.11", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { version = "0.2", features = ["full"] } -postgres = { version = "0.17.0", path = "../postgres" } +futures-util = "0.3" +tokio = { version = "1.0", features = ["macros", "net", "rt"] } +postgres = { version = "0.19.8", path = "../postgres" } diff --git a/postgres-native-tls/LICENSE-APACHE b/postgres-native-tls/LICENSE-APACHE index b9e46b0fc..965b606f3 120000 --- a/postgres-native-tls/LICENSE-APACHE +++ b/postgres-native-tls/LICENSE-APACHE @@ -1 +1 @@ -../tokio-postgres/LICENSE-APACHE \ No newline at end of file +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-native-tls/LICENSE-MIT b/postgres-native-tls/LICENSE-MIT index 162832a42..76219eb72 120000 --- a/postgres-native-tls/LICENSE-MIT +++ b/postgres-native-tls/LICENSE-MIT @@ -1 +1 @@ -../tokio-postgres/LICENSE-MIT \ No newline at end of file +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-native-tls/src/lib.rs b/postgres-native-tls/src/lib.rs index 207ae6cb2..9ee7da653 100644 --- a/postgres-native-tls/src/lib.rs +++ b/postgres-native-tls/src/lib.rs @@ -1,13 +1,15 @@ -//! TLS support for `tokio-postgres` and `postgres` via `native-tls. +//! TLS support for `tokio-postgres` and `postgres` via `native-tls`. //! //! # Examples //! //! ```no_run //! use native_tls::{Certificate, TlsConnector}; +//! # #[cfg(feature = "runtime")] //! use postgres_native_tls::MakeTlsConnector; //! use std::fs; //! //! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "runtime")] { //! let cert = fs::read("database_cert.pem")?; //! let cert = Certificate::from_pem(&cert)?; //! let connector = TlsConnector::builder() @@ -19,6 +21,7 @@ //! "host=localhost user=postgres sslmode=require", //! connector, //! ); +//! # } //! //! // ... //! # Ok(()) @@ -27,10 +30,12 @@ //! //! ```no_run //! use native_tls::{Certificate, TlsConnector}; +//! # #[cfg(feature = "runtime")] //! use postgres_native_tls::MakeTlsConnector; //! use std::fs; //! //! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "runtime")] { //! let cert = fs::read("database_cert.pem")?; //! let cert = Certificate::from_pem(&cert)?; //! let connector = TlsConnector::builder() @@ -42,19 +47,18 @@ //! "host=localhost user=postgres sslmode=require", //! connector, //! )?; +//! # } //! # Ok(()) //! # } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-native-tls/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use bytes::{Buf, BufMut}; +use native_tls::TlsConnectorBuilder; use std::future::Future; use std::io; -use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, BufReader, ReadBuf}; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; @@ -94,7 +98,7 @@ where /// A `TlsConnect` implementation using the `native-tls` crate. pub struct TlsConnector { - connector: tokio_tls::TlsConnector, + connector: tokio_native_tls::TlsConnector, domain: String, } @@ -102,7 +106,7 @@ impl TlsConnector { /// Creates a new connector configured to connect to the specified domain. pub fn new(connector: native_tls::TlsConnector, domain: &str) -> TlsConnector { TlsConnector { - connector: tokio_tls::TlsConnector::from(connector), + connector: tokio_native_tls::TlsConnector::from(connector), domain: domain.to_string(), } } @@ -118,6 +122,7 @@ where type Future = Pin, native_tls::Error>> + Send>>; fn connect(self, stream: S) -> Self::Future { + let stream = BufReader::with_capacity(8192, stream); let future = async move { let stream = self.connector.connect(&self.domain, stream).await?; @@ -129,34 +134,19 @@ where } /// The stream returned by `TlsConnector`. -pub struct TlsStream(tokio_tls::TlsStream); +pub struct TlsStream(tokio_native_tls::TlsStream>); impl AsyncRead for TlsStream where S: AsyncRead + AsyncWrite + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.0.prepare_uninitialized_buffer(buf) - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { Pin::new(&mut self.0).poll_read(cx, buf) } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - Pin::new(&mut self.0).poll_read_buf(cx, buf) - } } impl AsyncWrite for TlsStream @@ -178,17 +168,6 @@ where fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.0).poll_shutdown(cx) } - - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - Pin::new(&mut self.0).poll_write_buf(cx, buf) - } } impl tls::TlsStream for TlsStream @@ -196,7 +175,16 @@ where S: AsyncRead + AsyncWrite + Unpin, { fn channel_binding(&self) -> ChannelBinding { - // FIXME https://github.com/tokio-rs/tokio/issues/1383 - ChannelBinding::none() + match self.0.get_ref().tls_server_end_point().ok().flatten() { + Some(buf) => ChannelBinding::tls_server_end_point(buf), + None => ChannelBinding::none(), + } } } + +/// Set ALPN for `TlsConnectorBuilder` +/// +/// This is required when using `sslnegotiation=direct` +pub fn set_postgresql_alpn(builder: &mut TlsConnectorBuilder) { + builder.request_alpns(&["postgresql"]); +} diff --git a/postgres-native-tls/src/test.rs b/postgres-native-tls/src/test.rs index 7a50bc672..738c04bd7 100644 --- a/postgres-native-tls/src/test.rs +++ b/postgres-native-tls/src/test.rs @@ -1,11 +1,11 @@ -use futures::FutureExt; +use futures_util::FutureExt; use native_tls::{self, Certificate}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; #[cfg(feature = "runtime")] use crate::MakeTlsConnector; -use crate::TlsConnector; +use crate::{set_postgresql_alpn, TlsConnector}; async fn smoke_test(s: &str, tls: T) where @@ -42,6 +42,21 @@ async fn require() { .await; } +#[tokio::test] +async fn direct() { + let mut builder = native_tls::TlsConnector::builder(); + builder.add_root_certificate( + Certificate::from_pem(include_bytes!("../../test/server.crt")).unwrap(), + ); + set_postgresql_alpn(&mut builder); + let connector = builder.build().unwrap(); + smoke_test( + "user=ssl_user dbname=postgres sslmode=require sslnegotiation=direct", + TlsConnector::new(connector, "localhost"), + ) + .await; +} + #[tokio::test] async fn prefer() { let connector = native_tls::TlsConnector::builder() diff --git a/postgres-openssl/CHANGELOG.md b/postgres-openssl/CHANGELOG.md index 45a1bd065..33f5a127a 100644 --- a/postgres-openssl/CHANGELOG.md +++ b/postgres-openssl/CHANGELOG.md @@ -1,5 +1,23 @@ # Change Log +## v0.5.1 - 2025-02-02 + +### Added + +* Added `set_postgresql_alpn`. + +## v0.5.0 - 2020-12-25 + +### Changed + +* Upgraded to `tokio-postgres` 0.7. + +## v0.4.0 - 2020-10-17 + +### Changed + +* Upgraded to `tokio-postgres` 0.6. + ## v0.3.0 - 2019-12-23 ### Changed diff --git a/postgres-openssl/Cargo.toml b/postgres-openssl/Cargo.toml index a3c9f65fa..6ebb86bef 100644 --- a/postgres-openssl/Cargo.toml +++ b/postgres-openssl/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "postgres-openssl" -version = "0.3.0" +version = "0.5.1" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "TLS support for tokio-postgres via openssl" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" @@ -16,13 +16,12 @@ default = ["runtime"] runtime = ["tokio-postgres/runtime"] [dependencies] -bytes = "0.5" -futures = "0.3" openssl = "0.10" -tokio = "0.2" -tokio-openssl = "0.4" -tokio-postgres = { version = "0.5.0", path = "../tokio-postgres", default-features = false } +tokio = "1.0" +tokio-openssl = "0.6" +tokio-postgres = { version = "0.7.11", path = "../tokio-postgres", default-features = false } [dev-dependencies] -tokio = { version = "0.2", features = ["full"] } -postgres = { version = "0.17.0", path = "../postgres" } +futures-util = "0.3" +tokio = { version = "1.0", features = ["macros", "net", "rt"] } +postgres = { version = "0.19.8", path = "../postgres" } diff --git a/postgres-openssl/LICENSE-APACHE b/postgres-openssl/LICENSE-APACHE index b9e46b0fc..965b606f3 120000 --- a/postgres-openssl/LICENSE-APACHE +++ b/postgres-openssl/LICENSE-APACHE @@ -1 +1 @@ -../tokio-postgres/LICENSE-APACHE \ No newline at end of file +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-openssl/LICENSE-MIT b/postgres-openssl/LICENSE-MIT index 162832a42..76219eb72 120000 --- a/postgres-openssl/LICENSE-MIT +++ b/postgres-openssl/LICENSE-MIT @@ -1 +1 @@ -../tokio-postgres/LICENSE-MIT \ No newline at end of file +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-openssl/src/lib.rs b/postgres-openssl/src/lib.rs index 23a653c60..232cccd05 100644 --- a/postgres-openssl/src/lib.rs +++ b/postgres-openssl/src/lib.rs @@ -4,9 +4,11 @@ //! //! ```no_run //! use openssl::ssl::{SslConnector, SslMethod}; +//! # #[cfg(feature = "runtime")] //! use postgres_openssl::MakeTlsConnector; //! //! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "runtime")] { //! let mut builder = SslConnector::builder(SslMethod::tls())?; //! builder.set_ca_file("database_cert.pem")?; //! let connector = MakeTlsConnector::new(builder.build()); @@ -15,6 +17,7 @@ //! "host=localhost user=postgres sslmode=require", //! connector, //! ); +//! # } //! //! // ... //! # Ok(()) @@ -23,9 +26,11 @@ //! //! ```no_run //! use openssl::ssl::{SslConnector, SslMethod}; +//! # #[cfg(feature = "runtime")] //! use postgres_openssl::MakeTlsConnector; //! //! # fn main() -> Result<(), Box> { +//! # #[cfg(feature = "runtime")] { //! let mut builder = SslConnector::builder(SslMethod::tls())?; //! builder.set_ca_file("database_cert.pem")?; //! let connector = MakeTlsConnector::new(builder.build()); @@ -34,32 +39,32 @@ //! "host=localhost user=postgres sslmode=require", //! connector, //! )?; +//! # } //! //! // ... //! # Ok(()) //! # } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-openssl/0.3")] #![warn(rust_2018_idioms, clippy::all, missing_docs)] -use bytes::{Buf, BufMut}; #[cfg(feature = "runtime")] use openssl::error::ErrorStack; use openssl::hash::MessageDigest; use openssl::nid::Nid; #[cfg(feature = "runtime")] use openssl::ssl::SslConnector; -use openssl::ssl::{ConnectConfiguration, SslRef}; -use std::fmt::Debug; +use openssl::ssl::{self, ConnectConfiguration, SslConnectorBuilder, SslRef}; +use openssl::x509::X509VerifyResult; +use std::error::Error; +use std::fmt::{self, Debug}; use std::future::Future; use std::io; -use std::mem::MaybeUninit; use std::pin::Pin; #[cfg(feature = "runtime")] use std::sync::Arc; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_openssl::{HandshakeError, SslStream}; +use tokio::io::{AsyncRead, AsyncWrite, BufReader, ReadBuf}; +use tokio_openssl::SslStream; use tokio_postgres::tls; #[cfg(feature = "runtime")] use tokio_postgres::tls::MakeTlsConnect; @@ -68,6 +73,9 @@ use tokio_postgres::tls::{ChannelBinding, TlsConnect}; #[cfg(test)] mod test; +type ConfigCallback = + dyn Fn(&mut ConnectConfiguration, &str) -> Result<(), ErrorStack> + Sync + Send; + /// A `MakeTlsConnect` implementation using the `openssl` crate. /// /// Requires the `runtime` Cargo feature (enabled by default). @@ -75,7 +83,7 @@ mod test; #[derive(Clone)] pub struct MakeTlsConnector { connector: SslConnector, - config: Arc Result<(), ErrorStack> + Sync + Send>, + config: Arc, } #[cfg(feature = "runtime")] @@ -133,52 +141,70 @@ impl TlsConnector { impl TlsConnect for TlsConnector where - S: AsyncRead + AsyncWrite + Unpin + Debug + 'static + Sync + Send, + S: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Stream = TlsStream; - type Error = HandshakeError; + type Error = Box; #[allow(clippy::type_complexity)] - type Future = Pin, HandshakeError>> + Send>>; + type Future = Pin, Self::Error>> + Send>>; fn connect(self, stream: S) -> Self::Future { + let stream = BufReader::with_capacity(8192, stream); let future = async move { - let stream = tokio_openssl::connect(self.ssl, &self.domain, stream).await?; - Ok(TlsStream(stream)) + let ssl = self.ssl.into_ssl(&self.domain)?; + let mut stream = SslStream::new(ssl, stream)?; + match Pin::new(&mut stream).connect().await { + Ok(()) => Ok(TlsStream(stream)), + Err(error) => Err(Box::new(ConnectError { + error, + verify_result: stream.ssl().verify_result(), + }) as _), + } }; Box::pin(future) } } +#[derive(Debug)] +struct ConnectError { + error: ssl::Error, + verify_result: X509VerifyResult, +} + +impl fmt::Display for ConnectError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.error, fmt)?; + + if self.verify_result != X509VerifyResult::OK { + fmt.write_str(": ")?; + fmt::Display::fmt(&self.verify_result, fmt)?; + } + + Ok(()) + } +} + +impl Error for ConnectError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.error) + } +} + /// The stream returned by `TlsConnector`. -pub struct TlsStream(SslStream); +pub struct TlsStream(SslStream>); impl AsyncRead for TlsStream where S: AsyncRead + AsyncWrite + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - self.0.prepare_uninitialized_buffer(buf) - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { Pin::new(&mut self.0).poll_read(cx, buf) } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - Pin::new(&mut self.0).poll_read_buf(cx, buf) - } } impl AsyncWrite for TlsStream @@ -200,17 +226,6 @@ where fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.0).poll_shutdown(cx) } - - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - { - Pin::new(&mut self.0).poll_write_buf(cx, buf) - } } impl tls::TlsStream for TlsStream @@ -235,3 +250,10 @@ fn tls_server_end_point(ssl: &SslRef) -> Option> { }; cert.digest(md).ok().map(|b| b.to_vec()) } + +/// Set ALPN for `SslConnectorBuilder` +/// +/// This is required when using `sslnegotiation=direct` +pub fn set_postgresql_alpn(builder: &mut SslConnectorBuilder) -> Result<(), ErrorStack> { + builder.set_alpn_protos(b"\x0apostgresql") +} diff --git a/postgres-openssl/src/test.rs b/postgres-openssl/src/test.rs index 15ed90ad5..66bb22641 100644 --- a/postgres-openssl/src/test.rs +++ b/postgres-openssl/src/test.rs @@ -1,4 +1,4 @@ -use futures::FutureExt; +use futures_util::FutureExt; use openssl::ssl::{SslConnector, SslMethod}; use tokio::net::TcpStream; use tokio_postgres::tls::TlsConnect; @@ -37,6 +37,19 @@ async fn require() { .await; } +#[tokio::test] +async fn direct() { + let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); + builder.set_ca_file("../test/server.crt").unwrap(); + set_postgresql_alpn(&mut builder).unwrap(); + let ctx = builder.build(); + smoke_test( + "user=ssl_user dbname=postgres sslmode=require sslnegotiation=direct", + TlsConnector::new(ctx.configure().unwrap(), "localhost"), + ) + .await; +} + #[tokio::test] async fn prefer() { let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); diff --git a/postgres-protocol/CHANGELOG.md b/postgres-protocol/CHANGELOG.md index b099c1ffa..25e717128 100644 --- a/postgres-protocol/CHANGELOG.md +++ b/postgres-protocol/CHANGELOG.md @@ -1,5 +1,94 @@ # Change Log +## v0.6.8 - 2025-02-02 + +### Changed + +* Upgraded `getrandom`. + +## v0.6.7 - 2024-07-21 + +### Deprecated + +* Deprecated `ErrorField::value`. + +### Added + +* Added a `Clone` implementation for `DataRowBody`. +* Added `ErrorField::value_bytes`. + +### Changed + +* Upgraded `base64`. + +## v0.6.6 - 2023-08-19 + +### Added + +* Added the `js` feature for WASM support. + +## v0.6.5 - 2023-03-27 + +### Added + +* Added `message::frontend::flush`. +* Added `DataRowBody::buffer_bytes`. + +### Changed + +* Upgraded `base64`. + +## v0.6.4 - 2022-04-03 + +### Added + +* Added parsing support for `ltree`, `lquery`, and `ltxtquery`. + +## v0.6.3 - 2021-12-10 + +### Changed + +* Upgraded `hmac`, `md-5` and `sha`. + +## v0.6.2 - 2021-09-29 + +### Changed + +* Upgraded `hmac`. + +## v0.6.1 - 2021-04-03 + +### Added + +* Added the `password` module, which can be used to hash passwords before using them in queries like `ALTER USER`. +* Added type conversions for `LSN`. + +### Changed + +* Moved from `md5` to `md-5`. + +## v0.6.0 - 2020-12-25 + +### Changed + +* Upgraded `bytes`, `hmac`, and `rand`. + +### Added + +* Added `escape::{escape_literal, escape_identifier}`. + +## v0.5.3 - 2020-10-17 + +### Changed + +* Upgraded `base64` and `hmac`. + +## v0.5.2 - 2020-07-06 + +### Changed + +* Upgraded `hmac` and `sha2`. + ## v0.5.1 - 2020-03-17 ### Changed diff --git a/postgres-protocol/Cargo.toml b/postgres-protocol/Cargo.toml index ebad1aefc..9351ea14f 100644 --- a/postgres-protocol/Cargo.toml +++ b/postgres-protocol/Cargo.toml @@ -1,21 +1,26 @@ [package] name = "postgres-protocol" -version = "0.5.1" +version = "0.6.8" authors = ["Steven Fackler "] edition = "2018" description = "Low level Postgres protocol APIs" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" +[features] +default = [] +js = ["getrandom/wasm_js"] + [dependencies] -base64 = "0.12" +base64 = "0.22" byteorder = "1.0" -bytes = "0.5" +bytes = "1.0" fallible-iterator = "0.2" -hmac = "0.7" -md5 = "0.7" +hmac = "0.12" +md-5 = "0.10" memchr = "2.0" -rand = "0.7" -sha2 = "0.8" +rand = "0.9" +sha2 = "0.10" stringprep = "0.1" +getrandom = { version = "0.3", optional = true } diff --git a/postgres-protocol/LICENSE-APACHE b/postgres-protocol/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/postgres-protocol/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/postgres-protocol/LICENSE-APACHE b/postgres-protocol/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/postgres-protocol/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-protocol/LICENSE-MIT b/postgres-protocol/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/postgres-protocol/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/postgres-protocol/LICENSE-MIT b/postgres-protocol/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/postgres-protocol/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-protocol/src/authentication/mod.rs b/postgres-protocol/src/authentication/mod.rs index edacb46e7..71afa4b9b 100644 --- a/postgres-protocol/src/authentication/mod.rs +++ b/postgres-protocol/src/authentication/mod.rs @@ -1,5 +1,5 @@ //! Authentication protocol support. -use md5::Context; +use md5::{Digest, Md5}; pub mod sasl; @@ -10,14 +10,13 @@ pub mod sasl; /// `PasswordMessage` message. #[inline] pub fn md5_hash(username: &[u8], password: &[u8], salt: [u8; 4]) -> String { - let mut context = Context::new(); - context.consume(password); - context.consume(username); - let output = context.compute(); - context = Context::new(); - context.consume(format!("{:x}", output)); - context.consume(&salt); - format!("md5{:x}", context.compute()) + let mut md5 = Md5::new(); + md5.update(password); + md5.update(username); + let output = md5.finalize_reset(); + md5.update(format!("{:x}", output)); + md5.update(salt); + format!("md5{:x}", md5.finalize()) } #[cfg(test)] diff --git a/postgres-protocol/src/authentication/sasl.rs b/postgres-protocol/src/authentication/sasl.rs index af458bbaf..85a589c99 100644 --- a/postgres-protocol/src/authentication/sasl.rs +++ b/postgres-protocol/src/authentication/sasl.rs @@ -1,7 +1,11 @@ //! SASL-based authentication support. +use base64::display::Base64Display; +use base64::engine::general_purpose::STANDARD; +use base64::Engine; use hmac::{Hmac, Mac}; use rand::{self, Rng}; +use sha2::digest::FixedOutput; use sha2::{Digest, Sha256}; use std::fmt::Write; use std::io; @@ -31,18 +35,19 @@ fn normalize(pass: &[u8]) -> Vec { } } -fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { - let mut hmac = Hmac::::new_varkey(str).expect("HMAC is able to accept all key sizes"); - hmac.input(salt); - hmac.input(&[0, 0, 0, 1]); - let mut prev = hmac.result().code(); +pub(crate) fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] { + let mut hmac = + Hmac::::new_from_slice(str).expect("HMAC is able to accept all key sizes"); + hmac.update(salt); + hmac.update(&[0, 0, 0, 1]); + let mut prev = hmac.finalize().into_bytes(); let mut hi = prev; for _ in 1..i { - let mut hmac = Hmac::::new_varkey(str).expect("already checked above"); - hmac.input(prev.as_slice()); - prev = hmac.result().code(); + let mut hmac = Hmac::::new_from_slice(str).expect("already checked above"); + hmac.update(&prev); + prev = hmac.finalize().into_bytes(); for (hi, prev) in hi.iter_mut().zip(prev) { *hi ^= prev; @@ -131,10 +136,10 @@ impl ScramSha256 { /// Constructs a new instance which will use the provided password for authentication. pub fn new(password: &[u8], channel_binding: ChannelBinding) -> ScramSha256 { // rand 0.5's ThreadRng is cryptographically secure - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let nonce = (0..NONCE_LENGTH) .map(|_| { - let mut v = rng.gen_range(0x21u8, 0x7e); + let mut v = rng.random_range(0x21u8..0x7e); if v == 0x2c { v = 0x7e } @@ -175,7 +180,7 @@ impl ScramSha256 { password, channel_binding, } => (nonce, password, channel_binding), - _ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")), + _ => return Err(io::Error::other("invalid SCRAM state")), }; let message = @@ -187,43 +192,48 @@ impl ScramSha256 { return Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid nonce")); } - let salt = match base64::decode(parsed.salt) { + let salt = match STANDARD.decode(parsed.salt) { Ok(salt) => salt, Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; let salted_password = hi(&password, &salt, parsed.iteration_count); - let mut hmac = Hmac::::new_varkey(&salted_password) + let mut hmac = Hmac::::new_from_slice(&salted_password) .expect("HMAC is able to accept all key sizes"); - hmac.input(b"Client Key"); - let client_key = hmac.result().code(); + hmac.update(b"Client Key"); + let client_key = hmac.finalize().into_bytes(); let mut hash = Sha256::default(); - hash.input(client_key.as_slice()); - let stored_key = hash.result(); + hash.update(client_key.as_slice()); + let stored_key = hash.finalize_fixed(); let mut cbind_input = vec![]; cbind_input.extend(channel_binding.gs2_header().as_bytes()); cbind_input.extend(channel_binding.cbind_data()); - let cbind_input = base64::encode(&cbind_input); + let cbind_input = STANDARD.encode(&cbind_input); self.message.clear(); write!(&mut self.message, "c={},r={}", cbind_input, parsed.nonce).unwrap(); let auth_message = format!("n=,r={},{},{}", client_nonce, message, self.message); - let mut hmac = - Hmac::::new_varkey(&stored_key).expect("HMAC is able to accept all key sizes"); - hmac.input(auth_message.as_bytes()); - let client_signature = hmac.result(); + let mut hmac = Hmac::::new_from_slice(&stored_key) + .expect("HMAC is able to accept all key sizes"); + hmac.update(auth_message.as_bytes()); + let client_signature = hmac.finalize().into_bytes(); let mut client_proof = client_key; - for (proof, signature) in client_proof.iter_mut().zip(client_signature.code()) { + for (proof, signature) in client_proof.iter_mut().zip(client_signature) { *proof ^= signature; } - write!(&mut self.message, ",p={}", base64::encode(&*client_proof)).unwrap(); + write!( + &mut self.message, + ",p={}", + Base64Display::new(&client_proof, &STANDARD) + ) + .unwrap(); self.state = State::Finish { salted_password, @@ -242,7 +252,7 @@ impl ScramSha256 { salted_password, auth_message, } => (salted_password, auth_message), - _ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")), + _ => return Err(io::Error::other("invalid SCRAM state")), }; let message = @@ -252,28 +262,25 @@ impl ScramSha256 { let verifier = match parsed { ServerFinalMessage::Error(e) => { - return Err(io::Error::new( - io::ErrorKind::Other, - format!("SCRAM error: {}", e), - )); + return Err(io::Error::other(format!("SCRAM error: {}", e))); } ServerFinalMessage::Verifier(verifier) => verifier, }; - let verifier = match base64::decode(verifier) { + let verifier = match STANDARD.decode(verifier) { Ok(verifier) => verifier, Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; - let mut hmac = Hmac::::new_varkey(&salted_password) + let mut hmac = Hmac::::new_from_slice(&salted_password) .expect("HMAC is able to accept all key sizes"); - hmac.input(b"Server Key"); - let server_key = hmac.result(); + hmac.update(b"Server Key"); + let server_key = hmac.finalize().into_bytes(); - let mut hmac = Hmac::::new_varkey(&server_key.code()) + let mut hmac = Hmac::::new_from_slice(&server_key) .expect("HMAC is able to accept all key sizes"); - hmac.input(auth_message.as_bytes()); - hmac.verify(&verifier) + hmac.update(auth_message.as_bytes()); + hmac.verify_slice(&verifier) .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "SCRAM verification error")) } } @@ -329,10 +336,7 @@ impl<'a> Parser<'a> { } fn printable(&mut self) -> io::Result<&'a str> { - self.take_while(|c| match c { - '\x21'..='\x2b' | '\x2d'..='\x7e' => true, - _ => false, - }) + self.take_while(|c| matches!(c, '\x21'..='\x2b' | '\x2d'..='\x7e')) } fn nonce(&mut self) -> io::Result<&'a str> { @@ -342,10 +346,7 @@ impl<'a> Parser<'a> { } fn base64(&mut self) -> io::Result<&'a str> { - self.take_while(|c| match c { - 'a'..='z' | 'A'..='Z' | '0'..='9' | '/' | '+' | '=' => true, - _ => false, - }) + self.take_while(|c| matches!(c, 'a'..='z' | 'A'..='Z' | '0'..='9' | '/' | '+' | '=')) } fn salt(&mut self) -> io::Result<&'a str> { @@ -355,10 +356,7 @@ impl<'a> Parser<'a> { } fn posit_number(&mut self) -> io::Result { - let n = self.take_while(|c| match c { - '0'..='9' => true, - _ => false, - })?; + let n = self.take_while(|c| c.is_ascii_digit())?; n.parse() .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) } @@ -395,10 +393,7 @@ impl<'a> Parser<'a> { } fn value(&mut self) -> io::Result<&'a str> { - self.take_while(|c| match c { - '\0' | '=' | ',' => false, - _ => true, - }) + self.take_while(|c| matches!(c, '\0' | '=' | ',')) } fn server_error(&mut self) -> io::Result> { diff --git a/postgres-protocol/src/escape/mod.rs b/postgres-protocol/src/escape/mod.rs new file mode 100644 index 000000000..0ba7efdca --- /dev/null +++ b/postgres-protocol/src/escape/mod.rs @@ -0,0 +1,93 @@ +//! Provides functions for escaping literals and identifiers for use +//! in SQL queries. +//! +//! Prefer parameterized queries where possible. Do not escape +//! parameters in a parameterized query. + +#[cfg(test)] +mod test; + +/// Escape a literal and surround result with single quotes. Not +/// recommended in most cases. +/// +/// If input contains backslashes, result will be of the form ` +/// E'...'` so it is safe to use regardless of the setting of +/// standard_conforming_strings. +pub fn escape_literal(input: &str) -> String { + escape_internal(input, false) +} + +/// Escape an identifier and surround result with double quotes. +pub fn escape_identifier(input: &str) -> String { + escape_internal(input, true) +} + +// Translation of PostgreSQL libpq's PQescapeInternal(). Does not +// require a connection because input string is known to be valid +// UTF-8. +// +// Escape arbitrary strings. If as_ident is true, we escape the +// result as an identifier; if false, as a literal. The result is +// returned in a newly allocated buffer. If we fail due to an +// encoding violation or out of memory condition, we return NULL, +// storing an error message into conn. +fn escape_internal(input: &str, as_ident: bool) -> String { + let mut num_backslashes = 0; + let mut num_quotes = 0; + let quote_char = if as_ident { '"' } else { '\'' }; + + // Scan the string for characters that must be escaped. + for ch in input.chars() { + if ch == quote_char { + num_quotes += 1; + } else if ch == '\\' { + num_backslashes += 1; + } + } + + // Allocate output String. + let mut result_size = input.len() + num_quotes + 3; // two quotes, plus a NUL + if !as_ident && num_backslashes > 0 { + result_size += num_backslashes + 2; + } + + let mut output = String::with_capacity(result_size); + + // If we are escaping a literal that contains backslashes, we use + // the escape string syntax so that the result is correct under + // either value of standard_conforming_strings. We also emit a + // leading space in this case, to guard against the possibility + // that the result might be interpolated immediately following an + // identifier. + if !as_ident && num_backslashes > 0 { + output.push(' '); + output.push('E'); + } + + // Opening quote. + output.push(quote_char); + + // Use fast path if possible. + // + // We've already verified that the input string is well-formed in + // the current encoding. If it contains no quotes and, in the + // case of literal-escaping, no backslashes, then we can just copy + // it directly to the output buffer, adding the necessary quotes. + // + // If not, we must rescan the input and process each character + // individually. + if num_quotes == 0 && (num_backslashes == 0 || as_ident) { + output.push_str(input); + } else { + for ch in input.chars() { + if ch == quote_char || (!as_ident && ch == '\\') { + output.push(ch); + } + output.push(ch); + } + } + + output.push(quote_char); + + output +} diff --git a/postgres-protocol/src/escape/test.rs b/postgres-protocol/src/escape/test.rs new file mode 100644 index 000000000..4816a103b --- /dev/null +++ b/postgres-protocol/src/escape/test.rs @@ -0,0 +1,17 @@ +use crate::escape::{escape_identifier, escape_literal}; + +#[test] +fn test_escape_idenifier() { + assert_eq!(escape_identifier("foo"), String::from("\"foo\"")); + assert_eq!(escape_identifier("f\\oo"), String::from("\"f\\oo\"")); + assert_eq!(escape_identifier("f'oo"), String::from("\"f'oo\"")); + assert_eq!(escape_identifier("f\"oo"), String::from("\"f\"\"oo\"")); +} + +#[test] +fn test_escape_literal() { + assert_eq!(escape_literal("foo"), String::from("'foo'")); + assert_eq!(escape_literal("f\\oo"), String::from(" E'f\\\\oo'")); + assert_eq!(escape_literal("f'oo"), String::from("'f''oo'")); + assert_eq!(escape_literal("f\"oo"), String::from("'f\"oo'")); +} diff --git a/postgres-protocol/src/lib.rs b/postgres-protocol/src/lib.rs index 9ebbcba59..e0de3b6c6 100644 --- a/postgres-protocol/src/lib.rs +++ b/postgres-protocol/src/lib.rs @@ -9,7 +9,6 @@ //! //! This library assumes that the `client_encoding` backend parameter has been //! set to `UTF8`. It will most likely not behave properly if that is not the case. -#![doc(html_root_url = "https://docs.rs/postgres-protocol/0.5")] #![warn(missing_docs, rust_2018_idioms, clippy::all)] use byteorder::{BigEndian, ByteOrder}; @@ -17,12 +16,17 @@ use bytes::{BufMut, BytesMut}; use std::io; pub mod authentication; +pub mod escape; pub mod message; +pub mod password; pub mod types; /// A Postgres OID. pub type Oid = u32; +/// A Postgres Log Sequence Number (LSN). +pub type Lsn = u64; + /// An enum indicating if a value is `NULL` or not. pub enum IsNull { /// The value is `NULL`. @@ -56,7 +60,7 @@ macro_rules! from_usize { impl FromUsize for $t { #[inline] fn from_usize(x: usize) -> io::Result<$t> { - if x > <$t>::max_value() as usize { + if x > <$t>::MAX as usize { Err(io::Error::new( io::ErrorKind::InvalidInput, "value too large to transmit", diff --git a/postgres-protocol/src/message/backend.rs b/postgres-protocol/src/message/backend.rs index 68b5aa6e5..013bfbb81 100644 --- a/postgres-protocol/src/message/backend.rs +++ b/postgres-protocol/src/message/backend.rs @@ -450,9 +450,9 @@ impl CopyDataBody { } pub struct CopyInResponseBody { - storage: Bytes, - len: u16, format: u8, + len: u16, + storage: Bytes, } impl CopyInResponseBody { @@ -475,7 +475,7 @@ pub struct ColumnFormats<'a> { remaining: u16, } -impl<'a> FallibleIterator for ColumnFormats<'a> { +impl FallibleIterator for ColumnFormats<'_> { type Item = u16; type Error = io::Error; @@ -504,9 +504,9 @@ impl<'a> FallibleIterator for ColumnFormats<'a> { } pub struct CopyOutResponseBody { - storage: Bytes, - len: u16, format: u8, + len: u16, + storage: Bytes, } impl CopyOutResponseBody { @@ -524,6 +524,7 @@ impl CopyOutResponseBody { } } +#[derive(Debug, Clone)] pub struct DataRowBody { storage: Bytes, len: u16, @@ -543,6 +544,11 @@ impl DataRowBody { pub fn buffer(&self) -> &[u8] { &self.storage } + + #[inline] + pub fn buffer_bytes(&self) -> &Bytes { + &self.storage + } } pub struct DataRowRanges<'a> { @@ -551,7 +557,7 @@ pub struct DataRowRanges<'a> { remaining: u16, } -impl<'a> FallibleIterator for DataRowRanges<'a> { +impl FallibleIterator for DataRowRanges<'_> { type Item = Option>; type Error = io::Error; @@ -581,7 +587,7 @@ impl<'a> FallibleIterator for DataRowRanges<'a> { )); } let base = self.len - self.buf.len(); - self.buf = &self.buf[len as usize..]; + self.buf = &self.buf[len..]; Ok(Some(Some(base..base + len))) } } @@ -627,7 +633,7 @@ impl<'a> FallibleIterator for ErrorFields<'a> { } let value_end = find_null(self.buf, 0)?; - let value = get_str(&self.buf[..value_end])?; + let value = &self.buf[..value_end]; self.buf = &self.buf[value_end + 1..]; Ok(Some(ErrorField { type_, value })) @@ -636,17 +642,23 @@ impl<'a> FallibleIterator for ErrorFields<'a> { pub struct ErrorField<'a> { type_: u8, - value: &'a str, + value: &'a [u8], } -impl<'a> ErrorField<'a> { +impl ErrorField<'_> { #[inline] pub fn type_(&self) -> u8 { self.type_ } #[inline] + #[deprecated(note = "use value_bytes instead", since = "0.6.7")] pub fn value(&self) -> &str { + str::from_utf8(self.value).expect("error field value contained non-UTF8 bytes") + } + + #[inline] + pub fn value_bytes(&self) -> &[u8] { self.value } } @@ -705,7 +717,7 @@ pub struct Parameters<'a> { remaining: u16, } -impl<'a> FallibleIterator for Parameters<'a> { +impl FallibleIterator for Parameters<'_> { type Item = Oid; type Error = io::Error; diff --git a/postgres-protocol/src/message/frontend.rs b/postgres-protocol/src/message/frontend.rs index 8587cd080..600f7da48 100644 --- a/postgres-protocol/src/message/frontend.rs +++ b/postgres-protocol/src/message/frontend.rs @@ -260,7 +260,8 @@ where I: IntoIterator, { write_body(buf, |buf| { - buf.put_i32(196_608); + // postgres protocol version 3.0(196608) in bigger-endian + buf.put_i32(0x00_03_00_00); for (key, value) in parameters { write_cstr(key.as_bytes(), buf)?; write_cstr(value.as_bytes(), buf)?; @@ -270,6 +271,12 @@ where }) } +#[inline] +pub fn flush(buf: &mut BytesMut) { + buf.put_u8(b'H'); + write_body(buf, |_| Ok::<(), io::Error>(())).unwrap(); +} + #[inline] pub fn sync(buf: &mut BytesMut) { buf.put_u8(b'S'); diff --git a/postgres-protocol/src/password/mod.rs b/postgres-protocol/src/password/mod.rs new file mode 100644 index 000000000..445fb0c0e --- /dev/null +++ b/postgres-protocol/src/password/mod.rs @@ -0,0 +1,106 @@ +//! Functions to encrypt a password in the client. +//! +//! This is intended to be used by client applications that wish to +//! send commands like `ALTER USER joe PASSWORD 'pwd'`. The password +//! need not be sent in cleartext if it is encrypted on the client +//! side. This is good because it ensures the cleartext password won't +//! end up in logs pg_stat displays, etc. + +use crate::authentication::sasl; +use base64::display::Base64Display; +use base64::engine::general_purpose::STANDARD; +use hmac::{Hmac, Mac}; +use md5::Md5; +use rand::RngCore; +use sha2::digest::FixedOutput; +use sha2::{Digest, Sha256}; + +#[cfg(test)] +mod test; + +const SCRAM_DEFAULT_ITERATIONS: u32 = 4096; +const SCRAM_DEFAULT_SALT_LEN: usize = 16; + +/// Hash password using SCRAM-SHA-256 with a randomly-generated +/// salt. +/// +/// The client may assume the returned string doesn't contain any +/// special characters that would require escaping in an SQL command. +pub fn scram_sha_256(password: &[u8]) -> String { + let mut salt: [u8; SCRAM_DEFAULT_SALT_LEN] = [0; SCRAM_DEFAULT_SALT_LEN]; + let mut rng = rand::rng(); + rng.fill_bytes(&mut salt); + scram_sha_256_salt(password, salt) +} + +// Internal implementation of scram_sha_256 with a caller-provided +// salt. This is useful for testing. +pub(crate) fn scram_sha_256_salt(password: &[u8], salt: [u8; SCRAM_DEFAULT_SALT_LEN]) -> String { + // Prepare the password, per [RFC + // 4013](https://tools.ietf.org/html/rfc4013), if possible. + // + // Postgres treats passwords as byte strings (without embedded NUL + // bytes), but SASL expects passwords to be valid UTF-8. + // + // Follow the behavior of libpq's PQencryptPasswordConn(), and + // also the backend. If the password is not valid UTF-8, or if it + // contains prohibited characters (such as non-ASCII whitespace), + // just skip the SASLprep step and use the original byte + // sequence. + let prepared: Vec = match std::str::from_utf8(password) { + Ok(password_str) => { + match stringprep::saslprep(password_str) { + Ok(p) => p.into_owned().into_bytes(), + // contains invalid characters; skip saslprep + Err(_) => Vec::from(password), + } + } + // not valid UTF-8; skip saslprep + Err(_) => Vec::from(password), + }; + + // salt password + let salted_password = sasl::hi(&prepared, &salt, SCRAM_DEFAULT_ITERATIONS); + + // client key + let mut hmac = Hmac::::new_from_slice(&salted_password) + .expect("HMAC is able to accept all key sizes"); + hmac.update(b"Client Key"); + let client_key = hmac.finalize().into_bytes(); + + // stored key + let mut hash = Sha256::default(); + hash.update(client_key.as_slice()); + let stored_key = hash.finalize_fixed(); + + // server key + let mut hmac = Hmac::::new_from_slice(&salted_password) + .expect("HMAC is able to accept all key sizes"); + hmac.update(b"Server Key"); + let server_key = hmac.finalize().into_bytes(); + + format!( + "SCRAM-SHA-256${}:{}${}:{}", + SCRAM_DEFAULT_ITERATIONS, + Base64Display::new(&salt, &STANDARD), + Base64Display::new(&stored_key, &STANDARD), + Base64Display::new(&server_key, &STANDARD) + ) +} + +/// **Not recommended, as MD5 is not considered to be secure.** +/// +/// Hash password using MD5 with the username as the salt. +/// +/// The client may assume the returned string doesn't contain any +/// special characters that would require escaping. +pub fn md5(password: &[u8], username: &str) -> String { + // salt password with username + let mut salted_password = Vec::from(password); + salted_password.extend_from_slice(username.as_bytes()); + + let mut hash = Md5::new(); + hash.update(&salted_password); + let digest = hash.finalize(); + format!("md5{:x}", digest) +} diff --git a/postgres-protocol/src/password/test.rs b/postgres-protocol/src/password/test.rs new file mode 100644 index 000000000..1432cb204 --- /dev/null +++ b/postgres-protocol/src/password/test.rs @@ -0,0 +1,19 @@ +use crate::password; + +#[test] +fn test_encrypt_scram_sha_256() { + // Specify the salt to make the test deterministic. Any bytes will do. + let salt: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + assert_eq!( + password::scram_sha_256_salt(b"secret", salt), + "SCRAM-SHA-256$4096:AQIDBAUGBwgJCgsMDQ4PEA==$8rrDg00OqaiWXJ7p+sCgHEIaBSHY89ZJl3mfIsf32oY=:05L1f+yZbiN8O0AnO40Og85NNRhvzTS57naKRWCcsIA=" + ); +} + +#[test] +fn test_encrypt_md5() { + assert_eq!( + password::md5(b"secret", "foo"), + "md54ab2c5d00339c4b2a4e921d2dc4edec7" + ); +} diff --git a/postgres-protocol/src/types/mod.rs b/postgres-protocol/src/types/mod.rs index 621c01cc2..03bd90799 100644 --- a/postgres-protocol/src/types/mod.rs +++ b/postgres-protocol/src/types/mod.rs @@ -8,7 +8,7 @@ use std::io::Read; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str; -use crate::{write_nullable, FromUsize, IsNull, Oid}; +use crate::{write_nullable, FromUsize, IsNull, Lsn, Oid}; #[cfg(test)] mod test; @@ -142,6 +142,22 @@ pub fn int8_from_sql(mut buf: &[u8]) -> Result Result> { + let v = buf.read_u64::()?; + if !buf.is_empty() { + return Err("invalid buffer size".into()); + } + Ok(v) +} + /// Serializes a `FLOAT4` value. #[inline] pub fn float4_to_sql(v: f32, buf: &mut BytesMut) { @@ -215,9 +231,9 @@ fn write_pascal_string(s: &str, buf: &mut BytesMut) -> Result<(), StdBox( - mut buf: &'a [u8], -) -> Result, StdBox> { +pub fn hstore_from_sql( + mut buf: &[u8], +) -> Result, StdBox> { let count = buf.read_i32::()?; if count < 0 { return Err("invalid entry count".into()); @@ -303,14 +319,12 @@ where /// Deserializes a `VARBIT` or `BIT` value. #[inline] -pub fn varbit_from_sql<'a>( - mut buf: &'a [u8], -) -> Result, StdBox> { +pub fn varbit_from_sql(mut buf: &[u8]) -> Result, StdBox> { let len = buf.read_i32::()?; if len < 0 { return Err("invalid varbit length: varbit < 0".into()); } - let bytes = (len as usize + 7) / 8; + let bytes = (len as usize).div_ceil(8); if buf.len() != bytes { return Err("invalid message length: varbit mismatch".into()); } @@ -492,7 +506,7 @@ where /// Deserializes an array value. #[inline] -pub fn array_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn array_from_sql(mut buf: &[u8]) -> Result, StdBox> { let dimensions = buf.read_i32::()?; if dimensions < 0 { return Err("invalid dimension count".into()); @@ -568,7 +582,7 @@ impl<'a> Array<'a> { /// An iterator over the dimensions of an array. pub struct ArrayDimensions<'a>(&'a [u8]); -impl<'a> FallibleIterator for ArrayDimensions<'a> { +impl FallibleIterator for ArrayDimensions<'_> { type Item = ArrayDimension; type Error = StdBox; @@ -722,7 +736,7 @@ pub enum RangeBound { /// Deserializes a range value. #[inline] -pub fn range_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn range_from_sql(mut buf: &[u8]) -> Result, StdBox> { let tag = buf.read_u8()?; if tag == RANGE_EMPTY { @@ -895,7 +909,7 @@ where /// Deserializes a Postgres path. #[inline] -pub fn path_from_sql<'a>(mut buf: &'a [u8]) -> Result, StdBox> { +pub fn path_from_sql(mut buf: &[u8]) -> Result, StdBox> { let closed = buf.read_u8()? != 0; let points = buf.read_i32::()?; @@ -936,7 +950,7 @@ pub struct PathPoints<'a> { buf: &'a [u8], } -impl<'a> FallibleIterator for PathPoints<'a> { +impl FallibleIterator for PathPoints<'_> { type Item = Point; type Error = StdBox; @@ -1045,3 +1059,60 @@ impl Inet { self.netmask } } + +/// Serializes a Postgres ltree string +#[inline] +pub fn ltree_to_sql(v: &str, buf: &mut BytesMut) { + // A version number is prepended to an ltree string per spec + buf.put_u8(1); + // Append the rest of the query + buf.put_slice(v.as_bytes()); +} + +/// Deserialize a Postgres ltree string +#[inline] +pub fn ltree_from_sql(buf: &[u8]) -> Result<&str, StdBox> { + match buf { + // Remove the version number from the front of the ltree per spec + [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), + _ => Err("ltree version 1 only supported".into()), + } +} + +/// Serializes a Postgres lquery string +#[inline] +pub fn lquery_to_sql(v: &str, buf: &mut BytesMut) { + // A version number is prepended to an lquery string per spec + buf.put_u8(1); + // Append the rest of the query + buf.put_slice(v.as_bytes()); +} + +/// Deserialize a Postgres lquery string +#[inline] +pub fn lquery_from_sql(buf: &[u8]) -> Result<&str, StdBox> { + match buf { + // Remove the version number from the front of the lquery per spec + [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), + _ => Err("lquery version 1 only supported".into()), + } +} + +/// Serializes a Postgres ltxtquery string +#[inline] +pub fn ltxtquery_to_sql(v: &str, buf: &mut BytesMut) { + // A version number is prepended to an ltxtquery string per spec + buf.put_u8(1); + // Append the rest of the query + buf.put_slice(v.as_bytes()); +} + +/// Deserialize a Postgres ltxtquery string +#[inline] +pub fn ltxtquery_from_sql(buf: &[u8]) -> Result<&str, StdBox> { + match buf { + // Remove the version number from the front of the ltxtquery per spec + [1u8, rest @ ..] => Ok(str::from_utf8(rest)?), + _ => Err("ltxtquery version 1 only supported".into()), + } +} diff --git a/postgres-protocol/src/types/test.rs b/postgres-protocol/src/types/test.rs index 8796ab31b..3e33b08f0 100644 --- a/postgres-protocol/src/types/test.rs +++ b/postgres-protocol/src/types/test.rs @@ -1,4 +1,4 @@ -use bytes::BytesMut; +use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; use std::collections::HashMap; @@ -6,6 +6,7 @@ use super::*; use crate::IsNull; #[test] +#[allow(clippy::bool_assert_comparison)] fn bool() { let mut buf = BytesMut::new(); bool_to_sql(true, &mut buf); @@ -113,7 +114,7 @@ fn array() { .unwrap(); let array = array_from_sql(&buf).unwrap(); - assert_eq!(array.has_nulls(), true); + assert!(array.has_nulls()); assert_eq!(array.element_type(), 10); assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); assert_eq!(array.values().collect::>().unwrap(), values); @@ -150,8 +151,92 @@ fn non_null_array() { .unwrap(); let array = array_from_sql(&buf).unwrap(); - assert_eq!(array.has_nulls(), false); + assert!(!array.has_nulls()); assert_eq!(array.element_type(), 10); assert_eq!(array.dimensions().collect::>().unwrap(), dimensions); assert_eq!(array.values().collect::>().unwrap(), values); } + +#[test] +fn ltree_sql() { + let mut query = vec![1u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + let mut buf = BytesMut::new(); + + ltree_to_sql("A.B.C", &mut buf); + + assert_eq!(query.as_slice(), buf.chunk()); +} + +#[test] +fn ltree_str() { + let mut query = vec![1u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + assert!(ltree_from_sql(query.as_slice()).is_ok()) +} + +#[test] +fn ltree_wrong_version() { + let mut query = vec![2u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + assert!(ltree_from_sql(query.as_slice()).is_err()) +} + +#[test] +fn lquery_sql() { + let mut query = vec![1u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + let mut buf = BytesMut::new(); + + lquery_to_sql("A.B.C", &mut buf); + + assert_eq!(query.as_slice(), buf.chunk()); +} + +#[test] +fn lquery_str() { + let mut query = vec![1u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + assert!(lquery_from_sql(query.as_slice()).is_ok()) +} + +#[test] +fn lquery_wrong_version() { + let mut query = vec![2u8]; + query.extend_from_slice("A.B.C".as_bytes()); + + assert!(lquery_from_sql(query.as_slice()).is_err()) +} + +#[test] +fn ltxtquery_sql() { + let mut query = vec![1u8]; + query.extend_from_slice("a & b*".as_bytes()); + + let mut buf = BytesMut::new(); + + ltree_to_sql("a & b*", &mut buf); + + assert_eq!(query.as_slice(), buf.chunk()); +} + +#[test] +fn ltxtquery_str() { + let mut query = vec![1u8]; + query.extend_from_slice("a & b*".as_bytes()); + + assert!(ltree_from_sql(query.as_slice()).is_ok()) +} + +#[test] +fn ltxtquery_wrong_version() { + let mut query = vec![2u8]; + query.extend_from_slice("a & b*".as_bytes()); + + assert!(ltree_from_sql(query.as_slice()).is_err()) +} diff --git a/postgres-types/CHANGELOG.md b/postgres-types/CHANGELOG.md index f12c5c7f6..7fa6d6506 100644 --- a/postgres-types/CHANGELOG.md +++ b/postgres-types/CHANGELOG.md @@ -1,5 +1,112 @@ # Change Log +## Unreleased + +## v0.2.9 - 2025-02-02 + +### Added + +* Added support for `cidr` 0.3 via the `with-cidr-0_3` feature. + +### Fixed + +* Fixed deserialization of out of bounds inputs to `time` 0.3 types to return an error rather than panic. + +## v0.2.8 - 2024-09-15 + +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. + +## v0.2.7 - 2024-07-21 + +### Added + +* Added `Default` implementation for `Json`. +* Added a `js` feature for WASM compatibility. + +### Changed + +* `FromStr` implementation for `PgLsn` no longer allocates a `Vec` when splitting an lsn string on it's `/`. +* The `eui48-1` feature no longer enables default features of the `eui48` library. + +## v0.2.6 - 2023-08-19 + +### Fixed + +* Fixed serialization to `OIDVECTOR` and `INT2VECTOR`. + +### Added + +* Removed the `'static` requirement for the `impl BorrowToSql for Box`. +* Added a `ToSql` implementation for `Cow<[u8]>`. + +## v0.2.5 - 2023-03-27 + +### Added + +* Added support for multi-range types. + +## v0.2.4 - 2022-08-20 + +### Added + +* Added `ToSql` and `FromSql` implementations for `Box<[T]>`. +* Added `ToSql` and `FromSql` implementations for `[u8; N]` via the `array-impls` feature. +* Added support for `smol_str` 0.1 via the `with-smol_str-01` feature. +* Added `ToSql::encode_format` to support text encodings of parameters. + +## v0.2.3 - 2022-04-30 + +### Added + +* Added `ToSql` and `FromSql` implementations for `Box`. +* Added `BorrowToSql` implementations for `Box` and `Box`. +* Added support for `cidr` 0.2 via the `with-cidr-02` feature. +* Added conversions between the `LTREE`, `LQUERY` and `LTXTQUERY` types and Rust strings. +* Added support for `uuid` 1.0 via the `with-uuid-1` feature. + +## v0.2.2 - 2021-09-29 + +### Added + +* Added support for `eui48` 1.0 via the `with-eui48-1` feature. +* Added `ToSql` and `FromSql` implementations for array types via the `array-impls` feature. +* Added support for `time` 0.3 via the `with-time-0_3` feature. + +## v0.2.1 - 2021-04-03 + +### Added + +* Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. +* Added the `PgLsn` type, corresponding to `PG_LSN`. + +## v0.2.0 - 2020-12-25 + +### Changed + +* Upgraded `bytes` to 1.0. + +### Removed + +* Removed support for `geo-types` 0.4. + +## v0.1.3 - 2020-10-17 + +### Added + +* Implemented `Clone`, `PartialEq`, and `Eq` for `Json`. + +### Fixed + +* Checked for overflow in `NaiveDate` and `NaiveDateTime` conversions. + +## v0.1.2 - 2020-07-03 + +### Added + +* Added support for `geo-types` 0.6. + ## v0.1.1 - 2020-03-05 ### Added diff --git a/postgres-types/Cargo.toml b/postgres-types/Cargo.toml index a12f1f513..d6527f3b9 100644 --- a/postgres-types/Cargo.toml +++ b/postgres-types/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "postgres-types" -version = "0.1.1" +version = "0.2.9" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "Conversions between Rust and Postgres values" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" @@ -12,25 +12,50 @@ categories = ["database"] [features] derive = ["postgres-derive"] +array-impls = ["array-init"] +js = ["postgres-protocol/js"] with-bit-vec-0_6 = ["bit-vec-06"] +with-cidr-0_2 = ["cidr-02"] +with-cidr-0_3 = ["cidr-03"] with-chrono-0_4 = ["chrono-04"] with-eui48-0_4 = ["eui48-04"] -with-geo-types-0_4 = ["geo-types-04"] +with-eui48-1 = ["eui48-1"] +with-geo-types-0_6 = ["geo-types-06"] +with-geo-types-0_7 = ["geo-types-0_7"] +with-jiff-0_1 = ["jiff-01"] +with-jiff-0_2 = ["jiff-02"] with-serde_json-1 = ["serde-1", "serde_json-1"] +with-smol_str-01 = ["smol_str-01"] with-uuid-0_8 = ["uuid-08"] +with-uuid-1 = ["uuid-1"] with-time-0_2 = ["time-02"] +with-time-0_3 = ["time-03"] [dependencies] -bytes = "0.5" +bytes = "1.0" fallible-iterator = "0.2" -postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } -postgres-derive = { version = "0.4.0", optional = true, path = "../postgres-derive" } +postgres-protocol = { version = "0.6.8", path = "../postgres-protocol" } +postgres-derive = { version = "0.4.6", optional = true, path = "../postgres-derive" } +array-init = { version = "2", optional = true } bit-vec-06 = { version = "0.6", package = "bit-vec", optional = true } -chrono-04 = { version = "0.4", package = "chrono", optional = true } +chrono-04 = { version = "0.4.16", package = "chrono", default-features = false, features = [ + "clock", +], optional = true } +cidr-02 = { version = "0.2", package = "cidr", optional = true } +cidr-03 = { version = "0.3", package = "cidr", optional = true } +# eui48-04 will stop compiling and support will be removed +# See https://github.com/sfackler/rust-postgres/issues/1073 eui48-04 = { version = "0.4", package = "eui48", optional = true } -geo-types-04 = { version = "0.4", package = "geo-types", optional = true } +eui48-1 = { version = "1.0", package = "eui48", optional = true, default-features = false } +geo-types-06 = { version = "0.6", package = "geo-types", optional = true } +geo-types-0_7 = { version = "0.7", package = "geo-types", optional = true } +jiff-01 = { version = "0.1", package = "jiff", optional = true } +jiff-02 = { version = "0.2", package = "jiff", optional = true } serde-1 = { version = "1.0", package = "serde", optional = true } serde_json-1 = { version = "1.0", package = "serde_json", optional = true } uuid-08 = { version = "0.8", package = "uuid", optional = true } +uuid-1 = { version = "1.0", package = "uuid", optional = true } time-02 = { version = "0.2", package = "time", optional = true } +time-03 = { version = "0.3", package = "time", default-features = false, optional = true } +smol_str-01 = { version = "0.1.23", package = "smol_str", default-features = false, optional = true } diff --git a/postgres-types/LICENSE-APACHE b/postgres-types/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/postgres-types/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/postgres-types/LICENSE-APACHE b/postgres-types/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/postgres-types/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres-types/LICENSE-MIT b/postgres-types/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/postgres-types/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/postgres-types/LICENSE-MIT b/postgres-types/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/postgres-types/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/postgres-types/src/chrono_04.rs b/postgres-types/src/chrono_04.rs index 9bfbb786f..d599bde02 100644 --- a/postgres-types/src/chrono_04.rs +++ b/postgres-types/src/chrono_04.rs @@ -1,18 +1,25 @@ use bytes::BytesMut; -use chrono_04::{DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, Utc}; +use chrono_04::{ + DateTime, Duration, FixedOffset, Local, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc, +}; use postgres_protocol::types; use std::error::Error; use crate::{FromSql, IsNull, ToSql, Type}; fn base() -> NaiveDateTime { - NaiveDate::from_ymd(2000, 1, 1).and_hms(0, 0, 0) + NaiveDate::from_ymd_opt(2000, 1, 1) + .unwrap() + .and_hms_opt(0, 0, 0) + .unwrap() } impl<'a> FromSql<'a> for NaiveDateTime { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let t = types::timestamp_from_sql(raw)?; - Ok(base() + Duration::microseconds(t)) + base() + .checked_add_signed(Duration::microseconds(t)) + .ok_or_else(|| "value too large to decode".into()) } accepts!(TIMESTAMP); @@ -35,7 +42,7 @@ impl ToSql for NaiveDateTime { impl<'a> FromSql<'a> for DateTime { fn from_sql(type_: &Type, raw: &[u8]) -> Result, Box> { let naive = NaiveDateTime::from_sql(type_, raw)?; - Ok(DateTime::from_utc(naive, Utc)) + Ok(Utc.from_utc_datetime(&naive)) } accepts!(TIMESTAMPTZ); @@ -82,7 +89,7 @@ impl<'a> FromSql<'a> for DateTime { raw: &[u8], ) -> Result, Box> { let utc = DateTime::::from_sql(type_, raw)?; - Ok(utc.with_timezone(&FixedOffset::east(0))) + Ok(utc.with_timezone(&FixedOffset::east_opt(0).unwrap())) } accepts!(TIMESTAMPTZ); @@ -104,7 +111,10 @@ impl ToSql for DateTime { impl<'a> FromSql<'a> for NaiveDate { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let jd = types::date_from_sql(raw)?; - Ok(base().date() + Duration::days(i64::from(jd))) + base() + .date() + .checked_add_signed(Duration::days(i64::from(jd))) + .ok_or_else(|| "value too large to decode".into()) } accepts!(DATE); @@ -128,7 +138,7 @@ impl ToSql for NaiveDate { impl<'a> FromSql<'a> for NaiveTime { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let usec = types::time_from_sql(raw)?; - Ok(NaiveTime::from_hms(0, 0, 0) + Duration::microseconds(usec)) + Ok(NaiveTime::from_hms_opt(0, 0, 0).unwrap() + Duration::microseconds(usec)) } accepts!(TIME); @@ -136,7 +146,7 @@ impl<'a> FromSql<'a> for NaiveTime { impl ToSql for NaiveTime { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - let delta = self.signed_duration_since(NaiveTime::from_hms(0, 0, 0)); + let delta = self.signed_duration_since(NaiveTime::from_hms_opt(0, 0, 0).unwrap()); let time = match delta.num_microseconds() { Some(time) => time, None => return Err("value too large to transmit".into()), diff --git a/postgres-types/src/cidr_02.rs b/postgres-types/src/cidr_02.rs new file mode 100644 index 000000000..2de952c3c --- /dev/null +++ b/postgres-types/src/cidr_02.rs @@ -0,0 +1,44 @@ +use bytes::BytesMut; +use cidr_02::{IpCidr, IpInet}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for IpCidr { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(IpCidr::new(inet.addr(), inet.netmask())?) + } + + accepts!(CIDR); +} + +impl ToSql for IpCidr { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::inet_to_sql(self.first_address(), self.network_length(), w); + Ok(IsNull::No) + } + + accepts!(CIDR); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for IpInet { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(IpInet::new(inet.addr(), inet.netmask())?) + } + + accepts!(INET); +} + +impl ToSql for IpInet { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::inet_to_sql(self.address(), self.network_length(), w); + Ok(IsNull::No) + } + + accepts!(INET); + to_sql_checked!(); +} diff --git a/postgres-types/src/cidr_03.rs b/postgres-types/src/cidr_03.rs new file mode 100644 index 000000000..6a0178711 --- /dev/null +++ b/postgres-types/src/cidr_03.rs @@ -0,0 +1,44 @@ +use bytes::BytesMut; +use cidr_03::{IpCidr, IpInet}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for IpCidr { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(IpCidr::new(inet.addr(), inet.netmask())?) + } + + accepts!(CIDR); +} + +impl ToSql for IpCidr { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::inet_to_sql(self.first_address(), self.network_length(), w); + Ok(IsNull::No) + } + + accepts!(CIDR); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for IpInet { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let inet = types::inet_from_sql(raw)?; + Ok(IpInet::new(inet.addr(), inet.netmask())?) + } + + accepts!(INET); +} + +impl ToSql for IpInet { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::inet_to_sql(self.address(), self.network_length(), w); + Ok(IsNull::No) + } + + accepts!(INET); + to_sql_checked!(); +} diff --git a/postgres-types/src/eui48_1.rs b/postgres-types/src/eui48_1.rs new file mode 100644 index 000000000..4c35e63ce --- /dev/null +++ b/postgres-types/src/eui48_1.rs @@ -0,0 +1,27 @@ +use bytes::BytesMut; +use eui48_1::MacAddress; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for MacAddress { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let bytes = types::macaddr_from_sql(raw)?; + Ok(MacAddress::new(bytes)) + } + + accepts!(MACADDR); +} + +impl ToSql for MacAddress { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let mut bytes = [0; 6]; + bytes.copy_from_slice(self.as_bytes()); + types::macaddr_to_sql(bytes, w); + Ok(IsNull::No) + } + + accepts!(MACADDR); + to_sql_checked!(); +} diff --git a/postgres-types/src/geo_types_04.rs b/postgres-types/src/geo_types_06.rs similarity index 80% rename from postgres-types/src/geo_types_04.rs rename to postgres-types/src/geo_types_06.rs index eb8b958eb..0f0b14fd9 100644 --- a/postgres-types/src/geo_types_04.rs +++ b/postgres-types/src/geo_types_06.rs @@ -1,6 +1,6 @@ use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use geo_types_04::{Coordinate, LineString, Point, Rect}; +use geo_types_06::{Coordinate, LineString, Point, Rect}; use postgres_protocol::types; use std::error::Error; @@ -28,16 +28,10 @@ impl ToSql for Point { impl<'a> FromSql<'a> for Rect { fn from_sql(_: &Type, raw: &[u8]) -> Result> { let rect = types::box_from_sql(raw)?; - Ok(Rect { - min: Coordinate { - x: rect.lower_left().x(), - y: rect.lower_left().y(), - }, - max: Coordinate { - x: rect.upper_right().x(), - y: rect.upper_right().y(), - }, - }) + Ok(Rect::new( + (rect.lower_left().x(), rect.lower_left().y()), + (rect.upper_right().x(), rect.upper_right().y()), + )) } accepts!(BOX); @@ -45,7 +39,7 @@ impl<'a> FromSql<'a> for Rect { impl ToSql for Rect { fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { - types::box_to_sql(self.min.x, self.min.y, self.max.x, self.max.y, out); + types::box_to_sql(self.min().x, self.min().y, self.max().x, self.max().y, out); Ok(IsNull::No) } diff --git a/postgres-types/src/geo_types_07.rs b/postgres-types/src/geo_types_07.rs new file mode 100644 index 000000000..bf7fa5601 --- /dev/null +++ b/postgres-types/src/geo_types_07.rs @@ -0,0 +1,72 @@ +use bytes::BytesMut; +use fallible_iterator::FallibleIterator; +use geo_types_0_7::{Coord, LineString, Point, Rect}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for Point { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let point = types::point_from_sql(raw)?; + Ok(Point::new(point.x(), point.y())) + } + + accepts!(POINT); +} + +impl ToSql for Point { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::point_to_sql(self.x(), self.y(), out); + Ok(IsNull::No) + } + + accepts!(POINT); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Rect { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let rect = types::box_from_sql(raw)?; + Ok(Rect::new( + (rect.lower_left().x(), rect.lower_left().y()), + (rect.upper_right().x(), rect.upper_right().y()), + )) + } + + accepts!(BOX); +} + +impl ToSql for Rect { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::box_to_sql(self.min().x, self.min().y, self.max().x, self.max().y, out); + Ok(IsNull::No) + } + + accepts!(BOX); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for LineString { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let path = types::path_from_sql(raw)?; + let points = path + .points() + .map(|p| Ok(Coord { x: p.x(), y: p.y() })) + .collect()?; + Ok(LineString(points)) + } + + accepts!(PATH); +} + +impl ToSql for LineString { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + let closed = false; // always encode an open path from LineString + types::path_to_sql(closed, self.0.iter().map(|p| (p.x, p.y)), out)?; + Ok(IsNull::No) + } + + accepts!(PATH); + to_sql_checked!(); +} diff --git a/postgres-types/src/jiff_01.rs b/postgres-types/src/jiff_01.rs new file mode 100644 index 000000000..d3215c0e6 --- /dev/null +++ b/postgres-types/src/jiff_01.rs @@ -0,0 +1,141 @@ +use bytes::BytesMut; +use jiff_01::{ + civil::{Date, DateTime, Time}, + Span, SpanRound, Timestamp, Unit, +}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +const fn base() -> DateTime { + DateTime::constant(2000, 1, 1, 0, 0, 0, 0) +} + +/// The number of seconds from the Unix epoch to 2000-01-01 00:00:00 UTC. +const PG_EPOCH: i64 = 946684800; + +fn base_ts() -> Timestamp { + Timestamp::new(PG_EPOCH, 0).unwrap() +} + +fn round_us<'a>() -> SpanRound<'a> { + SpanRound::new().largest(Unit::Microsecond) +} + +fn decode_err(_e: E) -> Box +where + E: Error, +{ + "value too large to decode".into() +} + +fn transmit_err(_e: E) -> Box +where + E: Error, +{ + "value too large to transmit".into() +} + +impl<'a> FromSql<'a> for DateTime { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIMESTAMP); +} + +impl ToSql for DateTime { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(base()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMP); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Timestamp { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base_ts().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIMESTAMPTZ); +} + +impl ToSql for Timestamp { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(base_ts()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Date { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::date_from_sql(raw)?; + Span::new() + .try_days(v) + .and_then(|s| base().date().checked_add(s)) + .map_err(decode_err) + } + accepts!(DATE); +} + +impl ToSql for Date { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self.since(base().date()).map_err(transmit_err)?.get_days(); + types::date_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(DATE); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Time { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::time_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| Time::midnight().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIME); +} + +impl ToSql for Time { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(Time::midnight()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::time_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIME); + to_sql_checked!(); +} diff --git a/postgres-types/src/jiff_02.rs b/postgres-types/src/jiff_02.rs new file mode 100644 index 000000000..a736dd3eb --- /dev/null +++ b/postgres-types/src/jiff_02.rs @@ -0,0 +1,141 @@ +use bytes::BytesMut; +use jiff_02::{ + civil::{Date, DateTime, Time}, + Span, SpanRound, Timestamp, Unit, +}; +use postgres_protocol::types; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +const fn base() -> DateTime { + DateTime::constant(2000, 1, 1, 0, 0, 0, 0) +} + +/// The number of seconds from the Unix epoch to 2000-01-01 00:00:00 UTC. +const PG_EPOCH: i64 = 946684800; + +fn base_ts() -> Timestamp { + Timestamp::new(PG_EPOCH, 0).unwrap() +} + +fn round_us<'a>() -> SpanRound<'a> { + SpanRound::new().largest(Unit::Microsecond) +} + +fn decode_err(_e: E) -> Box +where + E: Error, +{ + "value too large to decode".into() +} + +fn transmit_err(_e: E) -> Box +where + E: Error, +{ + "value too large to transmit".into() +} + +impl<'a> FromSql<'a> for DateTime { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIMESTAMP); +} + +impl ToSql for DateTime { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(base()) + .and_then(|s| s.round(round_us().relative(base()))) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMP); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Timestamp { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::timestamp_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| base_ts().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIMESTAMPTZ); +} + +impl ToSql for Timestamp { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(base_ts()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::timestamp_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Date { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::date_from_sql(raw)?; + Span::new() + .try_days(v) + .and_then(|s| base().date().checked_add(s)) + .map_err(decode_err) + } + accepts!(DATE); +} + +impl ToSql for Date { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self.since(base().date()).map_err(transmit_err)?.get_days(); + types::date_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(DATE); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Time { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let v = types::time_from_sql(raw)?; + Span::new() + .try_microseconds(v) + .and_then(|s| Time::midnight().checked_add(s)) + .map_err(decode_err) + } + + accepts!(TIME); +} + +impl ToSql for Time { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let v = self + .since(Time::midnight()) + .and_then(|s| s.round(round_us())) + .map_err(transmit_err)? + .get_microseconds(); + types::time_to_sql(v, w); + Ok(IsNull::No) + } + + accepts!(TIME); + to_sql_checked!(); +} diff --git a/postgres-types/src/lib.rs b/postgres-types/src/lib.rs index 1479c1264..51137b6b4 100644 --- a/postgres-types/src/lib.rs +++ b/postgres-types/src/lib.rs @@ -6,7 +6,12 @@ //! # Derive //! //! If the `derive` cargo feature is enabled, you can derive `ToSql` and `FromSql` implementations for custom Postgres -//! types. +//! types. Explicitly, modify your `Cargo.toml` file to include the following: +//! +//! ```toml +//! [dependencies] +//! postgres-types = { version = "0.X.X", features = ["derive"] } +//! ``` //! //! ## Enums //! @@ -50,6 +55,21 @@ //! struct SessionId(Vec); //! ``` //! +//! ## Newtypes +//! +//! The `#[postgres(transparent)]` attribute can be used on a single-field tuple struct to create a +//! Rust-only wrapper type that will use the [`ToSql`] & [`FromSql`] implementation of the inner +//! value : +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! #[postgres(transparent)] +//! struct UserId(i32); +//! ``` +//! //! ## Composites //! //! Postgres composite types correspond to structs in Rust: @@ -105,11 +125,64 @@ //! Happy, //! } //! ``` -#![doc(html_root_url = "https://docs.rs/postgres-types/0.1")] +//! +//! Alternatively, the `#[postgres(rename_all = "...")]` attribute can be used to rename all fields or variants +//! with the chosen casing convention. This will not affect the struct or enum's type name. Note that +//! `#[postgres(name = "...")]` takes precendence when used in conjunction with `#[postgres(rename_all = "...")]`: +//! +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! #[postgres(name = "mood", rename_all = "snake_case")] +//! enum Mood { +//! #[postgres(name = "ok")] +//! Ok, // ok +//! VeryHappy, // very_happy +//! } +//! ``` +//! +//! The following case conventions are supported: +//! - `"lowercase"` +//! - `"UPPERCASE"` +//! - `"PascalCase"` +//! - `"camelCase"` +//! - `"snake_case"` +//! - `"SCREAMING_SNAKE_CASE"` +//! - `"kebab-case"` +//! - `"SCREAMING-KEBAB-CASE"` +//! - `"Train-Case"` +//! +//! ## Allowing Enum Mismatches +//! +//! By default the generated implementation of [`ToSql`] & [`FromSql`] for enums will require an exact match of the enum +//! variants between the Rust and Postgres types. +//! To allow mismatches, the `#[postgres(allow_mismatch)]` attribute can be used on the enum definition: +//! +//! ```sql +//! CREATE TYPE mood AS ENUM ( +//! 'Sad', +//! 'Ok', +//! 'Happy' +//! ); +//! ``` +//! +//! ```rust +//! # #[cfg(feature = "derive")] +//! use postgres_types::{ToSql, FromSql}; +//! +//! # #[cfg(feature = "derive")] +//! #[derive(Debug, ToSql, FromSql)] +//! #[postgres(allow_mismatch)] +//! enum Mood { +//! Happy, +//! Meh, +//! } +//! ``` #![warn(clippy::all, rust_2018_idioms, missing_docs)] - use fallible_iterator::FallibleIterator; -use postgres_protocol; use postgres_protocol::types::{self, ArrayDimension}; use std::any::type_name; use std::borrow::Cow; @@ -131,6 +204,9 @@ use crate::type_gen::{Inner, Other}; #[doc(inline)] pub use postgres_protocol::Oid; +#[doc(inline)] +pub use pg_lsn::PgLsn; + pub use crate::special::{Date, Timestamp}; use bytes::BytesMut; @@ -145,10 +221,7 @@ const NSEC_PER_USEC: u64 = 1_000; macro_rules! accepts { ($($expected:ident),+) => ( fn accepts(ty: &$crate::Type) -> bool { - match *ty { - $($crate::Type::$expected)|+ => true, - _ => false - } + matches!(*ty, $($crate::Type::$expected)|+) } ) } @@ -159,16 +232,17 @@ macro_rules! accepts { #[macro_export] macro_rules! to_sql_checked { () => { - fn to_sql_checked(&self, - ty: &$crate::Type, - out: &mut $crate::private::BytesMut) - -> ::std::result::Result<$crate::IsNull, - Box> { + fn to_sql_checked( + &self, + ty: &$crate::Type, + out: &mut $crate::private::BytesMut, + ) -> ::std::result::Result< + $crate::IsNull, + Box, + > { $crate::__to_sql_checked(self, ty, out) } - } + }; } // WARNING: this function is not considered part of this crate's public API. @@ -192,21 +266,40 @@ where mod bit_vec_06; #[cfg(feature = "with-chrono-0_4")] mod chrono_04; +#[cfg(feature = "with-cidr-0_2")] +mod cidr_02; +#[cfg(feature = "with-cidr-0_3")] +mod cidr_03; #[cfg(feature = "with-eui48-0_4")] mod eui48_04; -#[cfg(feature = "with-geo-types-0_4")] -mod geo_types_04; +#[cfg(feature = "with-eui48-1")] +mod eui48_1; +#[cfg(feature = "with-geo-types-0_6")] +mod geo_types_06; +#[cfg(feature = "with-geo-types-0_7")] +mod geo_types_07; +#[cfg(feature = "with-jiff-0_1")] +mod jiff_01; +#[cfg(feature = "with-jiff-0_2")] +mod jiff_02; #[cfg(feature = "with-serde_json-1")] mod serde_json_1; +#[cfg(feature = "with-smol_str-01")] +mod smol_str_01; #[cfg(feature = "with-time-0_2")] mod time_02; +#[cfg(feature = "with-time-0_3")] +mod time_03; #[cfg(feature = "with-uuid-0_8")] mod uuid_08; +#[cfg(feature = "with-uuid-1")] +mod uuid_1; // The time::{date, time} macros produce compile errors if the crate package is renamed. #[cfg(feature = "with-time-0_2")] extern crate time_02 as time; +mod pg_lsn; #[doc(hidden)] pub mod private; mod special; @@ -287,6 +380,8 @@ pub enum Kind { Array(Type), /// A range type along with the type of its elements. Range(Type), + /// A multirange type along with the type of its elements. + Multirange(Type), /// A domain type along with its underlying type. Domain(Type), /// A composite type along with information about its fields. @@ -378,6 +473,7 @@ impl WrongType { /// | `f32` | REAL | /// | `f64` | DOUBLE PRECISION | /// | `&str`/`String` | VARCHAR, CHAR(n), TEXT, CITEXT, NAME, UNKNOWN | +/// | | LTREE, LQUERY, LTXTQUERY | /// | `&[u8]`/`Vec` | BYTEA | /// | `HashMap>` | HSTORE | /// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | @@ -397,10 +493,16 @@ impl WrongType { /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | +/// | `cidr::IpCidr` | CIDR | +/// | `cidr::IpInet` | INET | /// | `time::PrimitiveDateTime` | TIMESTAMP | /// | `time::OffsetDateTime` | TIMESTAMP WITH TIME ZONE | /// | `time::Date` | DATE | /// | `time::Time` | TIME | +/// | `jiff::civil::Date` | DATE | +/// | `jiff::civil::DateTime` | TIMESTAMP | +/// | `jiff::civil::Time` | TIME | +/// | `jiff::Timestamp` | TIMESTAMP WITH TIME ZONE | /// | `eui48::MacAddress` | MACADDR | /// | `geo_types::Point` | POINT | /// | `geo_types::Rect` | BOX | @@ -409,6 +511,11 @@ impl WrongType { /// | `uuid::Uuid` | UUID | /// | `bit_vec::BitVec` | BIT, VARBIT | /// | `eui48::MacAddress` | MACADDR | +/// | `cidr::InetCidr` | CIDR | +/// | `cidr::InetAddr` | INET | +/// | `smol_str::SmolStr` | VARCHAR, CHAR(n), TEXT, CITEXT, | +/// | | NAME, UNKNOWN, LTREE, LQUERY, | +/// | | LTXTQUERY | /// /// # Nullability /// @@ -418,8 +525,11 @@ impl WrongType { /// /// # Arrays /// -/// `FromSql` is implemented for `Vec` where `T` implements `FromSql`, and -/// corresponds to one-dimensional Postgres arrays. +/// `FromSql` is implemented for `Vec`, `Box<[T]>` and `[T; N]` where `T` +/// implements `FromSql`, and corresponds to one-dimensional Postgres arrays. +/// +/// **Note:** the impl for arrays only exist when the Cargo feature `array-impls` +/// is enabled. pub trait FromSql<'a>: Sized { /// Creates a new value of this type from a buffer of data of the specified /// Postgres `Type` in its binary format. @@ -503,6 +613,57 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Vec { } } +#[cfg(feature = "array-impls")] +impl<'a, T: FromSql<'a>, const N: usize> FromSql<'a> for [T; N] { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + let member_type = match *ty.kind() { + Kind::Array(ref member) => member, + _ => panic!("expected array type"), + }; + + let array = types::array_from_sql(raw)?; + if array.dimensions().count()? > 1 { + return Err("array contains too many dimensions".into()); + } + + let mut values = array.values(); + let out = array_init::try_array_init(|i| { + let v = values + .next()? + .ok_or_else(|| -> Box { + format!("too few elements in array (expected {}, got {})", N, i).into() + })?; + T::from_sql_nullable(member_type, v) + })?; + if values.next()?.is_some() { + return Err(format!( + "excess elements in array (expected {}, got more than that)", + N, + ) + .into()); + } + + Ok(out) + } + + fn accepts(ty: &Type) -> bool { + match *ty.kind() { + Kind::Array(ref inner) => T::accepts(inner), + _ => false, + } + } +} + +impl<'a, T: FromSql<'a>> FromSql<'a> for Box<[T]> { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + Vec::::from_sql(ty, raw).map(Vec::into_boxed_slice) + } + + fn accepts(ty: &Type) -> bool { + Vec::::accepts(ty) + } +} + impl<'a> FromSql<'a> for Vec { fn from_sql(_: &Type, raw: &'a [u8]) -> Result, Box> { Ok(types::bytea_from_sql(raw).to_owned()) @@ -520,8 +681,20 @@ impl<'a> FromSql<'a> for &'a [u8] { } impl<'a> FromSql<'a> for String { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { - types::text_from_sql(raw).map(ToString::to_string) + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + <&str as FromSql>::from_sql(ty, raw).map(ToString::to_string) + } + + fn accepts(ty: &Type) -> bool { + <&str as FromSql>::accepts(ty) + } +} + +impl<'a> FromSql<'a> for Box { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result, Box> { + <&str as FromSql>::from_sql(ty, raw) + .map(ToString::to_string) + .map(String::into_boxed_str) } fn accepts(ty: &Type) -> bool { @@ -530,14 +703,26 @@ impl<'a> FromSql<'a> for String { } impl<'a> FromSql<'a> for &'a str { - fn from_sql(_: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { - types::text_from_sql(raw) + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result<&'a str, Box> { + match *ty { + ref ty if ty.name() == "ltree" => types::ltree_from_sql(raw), + ref ty if ty.name() == "lquery" => types::lquery_from_sql(raw), + ref ty if ty.name() == "ltxtquery" => types::ltxtquery_from_sql(raw), + _ => types::text_from_sql(raw), + } } fn accepts(ty: &Type) -> bool { match *ty { Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true, - ref ty if ty.name() == "citext" => true, + ref ty + if (ty.name() == "citext" + || ty.name() == "ltree" + || ty.name() == "lquery" + || ty.name() == "ltxtquery") => + { + true + } _ => false, } } @@ -588,7 +773,7 @@ impl<'a> FromSql<'a> for SystemTime { let epoch = UNIX_EPOCH + Duration::from_secs(TIME_SEC_CONVERSION); let negative = time < 0; - let time = time.abs() as u64; + let time = time.unsigned_abs(); let secs = time / USEC_PER_SEC; let nsec = (time % USEC_PER_SEC) * NSEC_PER_USEC; @@ -641,7 +826,8 @@ pub enum IsNull { /// | `f32` | REAL | /// | `f64` | DOUBLE PRECISION | /// | `&str`/`String` | VARCHAR, CHAR(n), TEXT, CITEXT, NAME | -/// | `&[u8]`/`Vec` | BYTEA | +/// | | LTREE, LQUERY, LTXTQUERY | +/// | `&[u8]`/`Vec`/`[u8; N]` | BYTEA | /// | `HashMap>` | HSTORE | /// | `SystemTime` | TIMESTAMP, TIMESTAMP WITH TIME ZONE | /// | `IpAddr` | INET | @@ -660,6 +846,8 @@ pub enum IsNull { /// | `chrono::DateTime` | TIMESTAMP WITH TIME ZONE | /// | `chrono::NaiveDate` | DATE | /// | `chrono::NaiveTime` | TIME | +/// | `cidr::IpCidr` | CIDR | +/// | `cidr::IpInet` | INET | /// | `time::PrimitiveDateTime` | TIMESTAMP | /// | `time::OffsetDateTime` | TIMESTAMP WITH TIME ZONE | /// | `time::Date` | DATE | @@ -681,8 +869,12 @@ pub enum IsNull { /// /// # Arrays /// -/// `ToSql` is implemented for `Vec` and `&[T]` where `T` implements `ToSql`, -/// and corresponds to one-dimensional Postgres arrays with an index offset of 1. +/// `ToSql` is implemented for `[u8; N]`, `Vec`, `&[T]`, `Box<[T]>` and `[T; N]` +/// where `T` implements `ToSql` and `N` is const usize, and corresponds to one-dimensional +/// Postgres arrays with an index offset of 1. +/// +/// **Note:** the impl for arrays only exist when the Cargo feature `array-impls` +/// is enabled. pub trait ToSql: fmt::Debug { /// Converts the value of `self` into the binary format of the specified /// Postgres `Type`, appending it to `out`. @@ -712,9 +904,25 @@ pub trait ToSql: fmt::Debug { ty: &Type, out: &mut BytesMut, ) -> Result>; + + /// Specify the encode format + fn encode_format(&self, _ty: &Type) -> Format { + Format::Binary + } } -impl<'a, T> ToSql for &'a T +/// Supported Postgres message format types +/// +/// Using Text format in a message assumes a Postgres `SERVER_ENCODING` of `UTF8` +#[derive(Clone, Copy, Debug)] +pub enum Format { + /// Text format (UTF-8) + Text, + /// Compact, typed binary format + Binary, +} + +impl ToSql for &T where T: ToSql, { @@ -730,6 +938,10 @@ where T::accepts(ty) } + fn encode_format(&self, ty: &Type) -> Format { + (*self).encode_format(ty) + } + to_sql_checked!(); } @@ -749,19 +961,32 @@ impl ToSql for Option { ::accepts(ty) } + fn encode_format(&self, ty: &Type) -> Format { + match self { + Some(val) => val.encode_format(ty), + None => Format::Binary, + } + } + to_sql_checked!(); } -impl<'a, T: ToSql> ToSql for &'a [T] { +impl ToSql for &[T] { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { let member_type = match *ty.kind() { Kind::Array(ref member) => member, _ => panic!("expected array type"), }; + // Arrays are normally one indexed by default but oidvector and int2vector *require* zero indexing + let lower_bound = match *ty { + Type::OID_VECTOR | Type::INT2_VECTOR => 0, + _ => 1, + }; + let dimension = ArrayDimension { len: downcast(self.len())?, - lower_bound: 1, + lower_bound, }; types::array_to_sql( @@ -787,9 +1012,9 @@ impl<'a, T: ToSql> ToSql for &'a [T] { to_sql_checked!(); } -impl<'a> ToSql for &'a [u8] { +impl ToSql for &[u8] { fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - types::bytea_to_sql(*self, w); + types::bytea_to_sql(self, w); Ok(IsNull::No) } @@ -798,6 +1023,31 @@ impl<'a> ToSql for &'a [u8] { to_sql_checked!(); } +#[cfg(feature = "array-impls")] +impl ToSql for [u8; N] { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::bytea_to_sql(&self[..], w); + Ok(IsNull::No) + } + + accepts!(BYTEA); + + to_sql_checked!(); +} + +#[cfg(feature = "array-impls")] +impl ToSql for [T; N] { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&[T] as ToSql>::to_sql(&&self[..], ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&[T] as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + impl ToSql for Vec { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[T] as ToSql>::to_sql(&&**self, ty, w) @@ -810,6 +1060,30 @@ impl ToSql for Vec { to_sql_checked!(); } +impl ToSql for Box<[T]> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&[T] as ToSql>::to_sql(&&**self, ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&[T] as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + +impl ToSql for Cow<'_, [u8]> { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&[u8] as ToSql>::to_sql(&self.as_ref(), ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&[u8] as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + impl ToSql for Vec { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { <&[u8] as ToSql>::to_sql(&&**self, ty, w) @@ -822,26 +1096,30 @@ impl ToSql for Vec { to_sql_checked!(); } -impl<'a> ToSql for &'a str { - fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { - types::text_to_sql(*self, w); +impl ToSql for &str { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + match ty.name() { + "ltree" => types::ltree_to_sql(self, w), + "lquery" => types::lquery_to_sql(self, w), + "ltxtquery" => types::ltxtquery_to_sql(self, w), + _ => types::text_to_sql(self, w), + } Ok(IsNull::No) } fn accepts(ty: &Type) -> bool { - match *ty { - Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN => true, - ref ty if ty.name() == "citext" => true, - _ => false, - } + matches!( + *ty, + Type::VARCHAR | Type::TEXT | Type::BPCHAR | Type::NAME | Type::UNKNOWN + ) || matches!(ty.name(), "citext" | "ltree" | "lquery" | "ltxtquery") } to_sql_checked!(); } -impl<'a> ToSql for Cow<'a, str> { +impl ToSql for Cow<'_, str> { fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { - <&str as ToSql>::to_sql(&&self.as_ref(), ty, w) + <&str as ToSql>::to_sql(&self.as_ref(), ty, w) } fn accepts(ty: &Type) -> bool { @@ -863,6 +1141,18 @@ impl ToSql for String { to_sql_checked!(); } +impl ToSql for Box { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&str as ToSql>::to_sql(&&**self, ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&str as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} + macro_rules! simple_to { ($t:ty, $f:ident, $($expected:ident),+) => { impl ToSql for $t { @@ -946,9 +1236,72 @@ impl ToSql for IpAddr { } fn downcast(len: usize) -> Result> { - if len > i32::max_value() as usize { + if len > i32::MAX as usize { Err("value too large to transmit".into()) } else { Ok(len as i32) } } + +mod sealed { + pub trait Sealed {} +} + +/// A trait used by clients to abstract over `&dyn ToSql` and `T: ToSql`. +/// +/// This cannot be implemented outside of this crate. +pub trait BorrowToSql: sealed::Sealed { + /// Returns a reference to `self` as a `ToSql` trait object. + fn borrow_to_sql(&self) -> &dyn ToSql; +} + +impl sealed::Sealed for &dyn ToSql {} + +impl BorrowToSql for &dyn ToSql { + #[inline] + fn borrow_to_sql(&self) -> &dyn ToSql { + *self + } +} + +impl sealed::Sealed for Box {} + +impl BorrowToSql for Box { + #[inline] + fn borrow_to_sql(&self) -> &dyn ToSql { + self.as_ref() + } +} + +impl sealed::Sealed for Box {} +impl BorrowToSql for Box { + #[inline] + fn borrow_to_sql(&self) -> &dyn ToSql { + self.as_ref() + } +} + +impl sealed::Sealed for &(dyn ToSql + Sync) {} + +/// In async contexts it is sometimes necessary to have the additional +/// Sync requirement on parameters for queries since this enables the +/// resulting Futures to be Send, hence usable in, e.g., tokio::spawn. +/// This instance is provided for those cases. +impl BorrowToSql for &(dyn ToSql + Sync) { + #[inline] + fn borrow_to_sql(&self) -> &dyn ToSql { + *self + } +} + +impl sealed::Sealed for T where T: ToSql {} + +impl BorrowToSql for T +where + T: ToSql, +{ + #[inline] + fn borrow_to_sql(&self) -> &dyn ToSql { + self + } +} diff --git a/postgres-types/src/pg_lsn.rs b/postgres-types/src/pg_lsn.rs new file mode 100644 index 000000000..f339f9689 --- /dev/null +++ b/postgres-types/src/pg_lsn.rs @@ -0,0 +1,77 @@ +//! Log Sequence Number (LSN) type for PostgreSQL Write-Ahead Log +//! (WAL), also known as the transaction log. + +use bytes::BytesMut; +use postgres_protocol::types; +use std::error::Error; +use std::fmt; +use std::str::FromStr; + +use crate::{FromSql, IsNull, ToSql, Type}; + +/// Postgres `PG_LSN` type. +#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] +pub struct PgLsn(u64); + +/// Error parsing LSN. +#[derive(Debug)] +pub struct ParseLsnError(()); + +impl From for PgLsn { + fn from(lsn_u64: u64) -> Self { + PgLsn(lsn_u64) + } +} + +impl From for u64 { + fn from(lsn: PgLsn) -> u64 { + lsn.0 + } +} + +impl FromStr for PgLsn { + type Err = ParseLsnError; + + fn from_str(lsn_str: &str) -> Result { + let Some((split_hi, split_lo)) = lsn_str.split_once('/') else { + return Err(ParseLsnError(())); + }; + let (hi, lo) = ( + u64::from_str_radix(split_hi, 16).map_err(|_| ParseLsnError(()))?, + u64::from_str_radix(split_lo, 16).map_err(|_| ParseLsnError(()))?, + ); + Ok(PgLsn((hi << 32) | lo)) + } +} + +impl fmt::Display for PgLsn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:X}/{:X}", self.0 >> 32, self.0 & 0x00000000ffffffff) + } +} + +impl fmt::Debug for PgLsn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("{}", self)) + } +} + +impl<'a> FromSql<'a> for PgLsn { + fn from_sql(_: &Type, raw: &'a [u8]) -> Result> { + let v = types::lsn_from_sql(raw)?; + Ok(v.into()) + } + + accepts!(PG_LSN); +} + +impl ToSql for PgLsn { + fn to_sql(&self, _: &Type, out: &mut BytesMut) -> Result> { + types::lsn_to_sql((*self).into(), out); + Ok(IsNull::No) + } + + accepts!(PG_LSN); + + to_sql_checked!(); +} diff --git a/postgres-types/src/serde_json_1.rs b/postgres-types/src/serde_json_1.rs index e5183d3f5..715c33f98 100644 --- a/postgres-types/src/serde_json_1.rs +++ b/postgres-types/src/serde_json_1.rs @@ -1,5 +1,4 @@ use crate::{FromSql, IsNull, ToSql, Type}; -use bytes::buf::BufMutExt; use bytes::{BufMut, BytesMut}; use serde_1::{Deserialize, Serialize}; use serde_json_1::Value; @@ -8,7 +7,7 @@ use std::fmt::Debug; use std::io::Read; /// A wrapper type to allow arbitrary `Serialize`/`Deserialize` types to convert to Postgres JSON values. -#[derive(Debug)] +#[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct Json(pub T); impl<'a, T> FromSql<'a> for Json diff --git a/postgres-types/src/smol_str_01.rs b/postgres-types/src/smol_str_01.rs new file mode 100644 index 000000000..a0d024ce2 --- /dev/null +++ b/postgres-types/src/smol_str_01.rs @@ -0,0 +1,27 @@ +use bytes::BytesMut; +use smol_str_01::SmolStr; +use std::error::Error; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for SmolStr { + fn from_sql(ty: &Type, raw: &'a [u8]) -> Result> { + <&str as FromSql>::from_sql(ty, raw).map(SmolStr::from) + } + + fn accepts(ty: &Type) -> bool { + <&str as FromSql>::accepts(ty) + } +} + +impl ToSql for SmolStr { + fn to_sql(&self, ty: &Type, w: &mut BytesMut) -> Result> { + <&str as ToSql>::to_sql(&&**self, ty, w) + } + + fn accepts(ty: &Type) -> bool { + <&str as ToSql>::accepts(ty) + } + + to_sql_checked!(); +} diff --git a/postgres-types/src/special.rs b/postgres-types/src/special.rs index 5a2d7bc08..d8541bf0e 100644 --- a/postgres-types/src/special.rs +++ b/postgres-types/src/special.rs @@ -1,12 +1,11 @@ use bytes::BytesMut; use postgres_protocol::types; use std::error::Error; -use std::{i32, i64}; use crate::{FromSql, IsNull, ToSql, Type}; /// A wrapper that can be used to represent infinity with `Type::Date` types. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Date { /// Represents `infinity`, a date that is later than all other dates. PosInfinity, @@ -55,7 +54,7 @@ impl ToSql for Date { /// A wrapper that can be used to represent infinity with `Type::Timestamp` and `Type::Timestamptz` /// types. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Timestamp { /// Represents `infinity`, a timestamp that is later than all other timestamps. PosInfinity, @@ -75,10 +74,7 @@ impl<'a, T: FromSql<'a>> FromSql<'a> for Timestamp { } fn accepts(ty: &Type) -> bool { - match *ty { - Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty) => true, - _ => false, - } + matches!(*ty, Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty)) } } @@ -99,10 +95,7 @@ impl ToSql for Timestamp { } fn accepts(ty: &Type) -> bool { - match *ty { - Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty) => true, - _ => false, - } + matches!(*ty, Type::TIMESTAMP | Type::TIMESTAMPTZ if T::accepts(ty)) } to_sql_checked!(); diff --git a/postgres-types/src/time_03.rs b/postgres-types/src/time_03.rs new file mode 100644 index 000000000..4deea663f --- /dev/null +++ b/postgres-types/src/time_03.rs @@ -0,0 +1,113 @@ +use bytes::BytesMut; +use postgres_protocol::types; +use std::convert::TryFrom; +use std::error::Error; +use time_03::{Date, Duration, OffsetDateTime, PrimitiveDateTime, Time, UtcOffset}; + +use crate::{FromSql, IsNull, ToSql, Type}; + +fn base() -> PrimitiveDateTime { + PrimitiveDateTime::new(Date::from_ordinal_date(2000, 1).unwrap(), Time::MIDNIGHT) +} + +impl<'a> FromSql<'a> for PrimitiveDateTime { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let t = types::timestamp_from_sql(raw)?; + Ok(base() + .checked_add(Duration::microseconds(t)) + .ok_or("value too large to decode")?) + } + + accepts!(TIMESTAMP); +} + +impl ToSql for PrimitiveDateTime { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let time = match i64::try_from((*self - base()).whole_microseconds()) { + Ok(time) => time, + Err(_) => return Err("value too large to transmit".into()), + }; + types::timestamp_to_sql(time, w); + Ok(IsNull::No) + } + + accepts!(TIMESTAMP); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for OffsetDateTime { + fn from_sql(type_: &Type, raw: &[u8]) -> Result> { + let primitive = PrimitiveDateTime::from_sql(type_, raw)?; + Ok(primitive.assume_utc()) + } + + accepts!(TIMESTAMPTZ); +} + +impl ToSql for OffsetDateTime { + fn to_sql( + &self, + type_: &Type, + w: &mut BytesMut, + ) -> Result> { + let utc_datetime = self.to_offset(UtcOffset::UTC); + let date = utc_datetime.date(); + let time = utc_datetime.time(); + let primitive = PrimitiveDateTime::new(date, time); + primitive.to_sql(type_, w) + } + + accepts!(TIMESTAMPTZ); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Date { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let jd = types::date_from_sql(raw)?; + Ok(base() + .date() + .checked_add(Duration::days(i64::from(jd))) + .ok_or("value too large to decode")?) + } + + accepts!(DATE); +} + +impl ToSql for Date { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let jd = (*self - base().date()).whole_days(); + if jd > i64::from(i32::max_value()) || jd < i64::from(i32::min_value()) { + return Err("value too large to transmit".into()); + } + + types::date_to_sql(jd as i32, w); + Ok(IsNull::No) + } + + accepts!(DATE); + to_sql_checked!(); +} + +impl<'a> FromSql<'a> for Time { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let usec = types::time_from_sql(raw)?; + Ok(Time::MIDNIGHT + Duration::microseconds(usec)) + } + + accepts!(TIME); +} + +impl ToSql for Time { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + let delta = *self - Time::MIDNIGHT; + let time = match i64::try_from(delta.whole_microseconds()) { + Ok(time) => time, + Err(_) => return Err("value too large to transmit".into()), + }; + types::time_to_sql(time, w); + Ok(IsNull::No) + } + + accepts!(TIME); + to_sql_checked!(); +} diff --git a/postgres-types/src/type_gen.rs b/postgres-types/src/type_gen.rs index 8156ccfa5..a1bc3f85c 100644 --- a/postgres-types/src/type_gen.rs +++ b/postgres-types/src/type_gen.rs @@ -35,6 +35,7 @@ pub enum Inner { PgNodeTree, JsonArray, TableAmHandler, + Xid8Array, IndexAmHandler, Point, Lseg, @@ -125,7 +126,6 @@ pub enum Inner { Trigger, LanguageHandler, Internal, - Opaque, Anyelement, RecordArray, Anynonarray, @@ -172,7 +172,32 @@ pub enum Inner { RegnamespaceArray, Regrole, RegroleArray, + Regcollation, + RegcollationArray, + Int4multiRange, + NummultiRange, + TsmultiRange, + TstzmultiRange, + DatemultiRange, + Int8multiRange, + AnymultiRange, + AnycompatiblemultiRange, + PgBrinBloomSummary, + PgBrinMinmaxMultiSummary, PgMcvList, + PgSnapshot, + PgSnapshotArray, + Xid8, + Anycompatible, + Anycompatiblearray, + Anycompatiblenonarray, + AnycompatibleRange, + Int4multiRangeArray, + NummultiRangeArray, + TsmultiRangeArray, + TstzmultiRangeArray, + DatemultiRangeArray, + Int8multiRangeArray, Other(Arc), } @@ -201,6 +226,7 @@ impl Inner { 194 => Some(Inner::PgNodeTree), 199 => Some(Inner::JsonArray), 269 => Some(Inner::TableAmHandler), + 271 => Some(Inner::Xid8Array), 325 => Some(Inner::IndexAmHandler), 600 => Some(Inner::Point), 601 => Some(Inner::Lseg), @@ -291,7 +317,6 @@ impl Inner { 2279 => Some(Inner::Trigger), 2280 => Some(Inner::LanguageHandler), 2281 => Some(Inner::Internal), - 2282 => Some(Inner::Opaque), 2283 => Some(Inner::Anyelement), 2287 => Some(Inner::RecordArray), 2776 => Some(Inner::Anynonarray), @@ -338,7 +363,32 @@ impl Inner { 4090 => Some(Inner::RegnamespaceArray), 4096 => Some(Inner::Regrole), 4097 => Some(Inner::RegroleArray), + 4191 => Some(Inner::Regcollation), + 4192 => Some(Inner::RegcollationArray), + 4451 => Some(Inner::Int4multiRange), + 4532 => Some(Inner::NummultiRange), + 4533 => Some(Inner::TsmultiRange), + 4534 => Some(Inner::TstzmultiRange), + 4535 => Some(Inner::DatemultiRange), + 4536 => Some(Inner::Int8multiRange), + 4537 => Some(Inner::AnymultiRange), + 4538 => Some(Inner::AnycompatiblemultiRange), + 4600 => Some(Inner::PgBrinBloomSummary), + 4601 => Some(Inner::PgBrinMinmaxMultiSummary), 5017 => Some(Inner::PgMcvList), + 5038 => Some(Inner::PgSnapshot), + 5039 => Some(Inner::PgSnapshotArray), + 5069 => Some(Inner::Xid8), + 5077 => Some(Inner::Anycompatible), + 5078 => Some(Inner::Anycompatiblearray), + 5079 => Some(Inner::Anycompatiblenonarray), + 5080 => Some(Inner::AnycompatibleRange), + 6150 => Some(Inner::Int4multiRangeArray), + 6151 => Some(Inner::NummultiRangeArray), + 6152 => Some(Inner::TsmultiRangeArray), + 6153 => Some(Inner::TstzmultiRangeArray), + 6155 => Some(Inner::DatemultiRangeArray), + 6157 => Some(Inner::Int8multiRangeArray), _ => None, } } @@ -367,6 +417,7 @@ impl Inner { Inner::PgNodeTree => 194, Inner::JsonArray => 199, Inner::TableAmHandler => 269, + Inner::Xid8Array => 271, Inner::IndexAmHandler => 325, Inner::Point => 600, Inner::Lseg => 601, @@ -457,7 +508,6 @@ impl Inner { Inner::Trigger => 2279, Inner::LanguageHandler => 2280, Inner::Internal => 2281, - Inner::Opaque => 2282, Inner::Anyelement => 2283, Inner::RecordArray => 2287, Inner::Anynonarray => 2776, @@ -504,7 +554,32 @@ impl Inner { Inner::RegnamespaceArray => 4090, Inner::Regrole => 4096, Inner::RegroleArray => 4097, + Inner::Regcollation => 4191, + Inner::RegcollationArray => 4192, + Inner::Int4multiRange => 4451, + Inner::NummultiRange => 4532, + Inner::TsmultiRange => 4533, + Inner::TstzmultiRange => 4534, + Inner::DatemultiRange => 4535, + Inner::Int8multiRange => 4536, + Inner::AnymultiRange => 4537, + Inner::AnycompatiblemultiRange => 4538, + Inner::PgBrinBloomSummary => 4600, + Inner::PgBrinMinmaxMultiSummary => 4601, Inner::PgMcvList => 5017, + Inner::PgSnapshot => 5038, + Inner::PgSnapshotArray => 5039, + Inner::Xid8 => 5069, + Inner::Anycompatible => 5077, + Inner::Anycompatiblearray => 5078, + Inner::Anycompatiblenonarray => 5079, + Inner::AnycompatibleRange => 5080, + Inner::Int4multiRangeArray => 6150, + Inner::NummultiRangeArray => 6151, + Inner::TsmultiRangeArray => 6152, + Inner::TstzmultiRangeArray => 6153, + Inner::DatemultiRangeArray => 6155, + Inner::Int8multiRangeArray => 6157, Inner::Other(ref u) => u.oid, } } @@ -533,6 +608,7 @@ impl Inner { Inner::PgNodeTree => &Kind::Simple, Inner::JsonArray => &Kind::Array(Type(Inner::Json)), Inner::TableAmHandler => &Kind::Pseudo, + Inner::Xid8Array => &Kind::Array(Type(Inner::Xid8)), Inner::IndexAmHandler => &Kind::Pseudo, Inner::Point => &Kind::Simple, Inner::Lseg => &Kind::Simple, @@ -623,7 +699,6 @@ impl Inner { Inner::Trigger => &Kind::Pseudo, Inner::LanguageHandler => &Kind::Pseudo, Inner::Internal => &Kind::Pseudo, - Inner::Opaque => &Kind::Pseudo, Inner::Anyelement => &Kind::Pseudo, Inner::RecordArray => &Kind::Pseudo, Inner::Anynonarray => &Kind::Pseudo, @@ -670,7 +745,32 @@ impl Inner { Inner::RegnamespaceArray => &Kind::Array(Type(Inner::Regnamespace)), Inner::Regrole => &Kind::Simple, Inner::RegroleArray => &Kind::Array(Type(Inner::Regrole)), + Inner::Regcollation => &Kind::Simple, + Inner::RegcollationArray => &Kind::Array(Type(Inner::Regcollation)), + Inner::Int4multiRange => &Kind::Multirange(Type(Inner::Int4)), + Inner::NummultiRange => &Kind::Multirange(Type(Inner::Numeric)), + Inner::TsmultiRange => &Kind::Multirange(Type(Inner::Timestamp)), + Inner::TstzmultiRange => &Kind::Multirange(Type(Inner::Timestamptz)), + Inner::DatemultiRange => &Kind::Multirange(Type(Inner::Date)), + Inner::Int8multiRange => &Kind::Multirange(Type(Inner::Int8)), + Inner::AnymultiRange => &Kind::Pseudo, + Inner::AnycompatiblemultiRange => &Kind::Pseudo, + Inner::PgBrinBloomSummary => &Kind::Simple, + Inner::PgBrinMinmaxMultiSummary => &Kind::Simple, Inner::PgMcvList => &Kind::Simple, + Inner::PgSnapshot => &Kind::Simple, + Inner::PgSnapshotArray => &Kind::Array(Type(Inner::PgSnapshot)), + Inner::Xid8 => &Kind::Simple, + Inner::Anycompatible => &Kind::Pseudo, + Inner::Anycompatiblearray => &Kind::Pseudo, + Inner::Anycompatiblenonarray => &Kind::Pseudo, + Inner::AnycompatibleRange => &Kind::Pseudo, + Inner::Int4multiRangeArray => &Kind::Array(Type(Inner::Int4multiRange)), + Inner::NummultiRangeArray => &Kind::Array(Type(Inner::NummultiRange)), + Inner::TsmultiRangeArray => &Kind::Array(Type(Inner::TsmultiRange)), + Inner::TstzmultiRangeArray => &Kind::Array(Type(Inner::TstzmultiRange)), + Inner::DatemultiRangeArray => &Kind::Array(Type(Inner::DatemultiRange)), + Inner::Int8multiRangeArray => &Kind::Array(Type(Inner::Int8multiRange)), Inner::Other(ref u) => &u.kind, } } @@ -699,6 +799,7 @@ impl Inner { Inner::PgNodeTree => "pg_node_tree", Inner::JsonArray => "_json", Inner::TableAmHandler => "table_am_handler", + Inner::Xid8Array => "_xid8", Inner::IndexAmHandler => "index_am_handler", Inner::Point => "point", Inner::Lseg => "lseg", @@ -789,7 +890,6 @@ impl Inner { Inner::Trigger => "trigger", Inner::LanguageHandler => "language_handler", Inner::Internal => "internal", - Inner::Opaque => "opaque", Inner::Anyelement => "anyelement", Inner::RecordArray => "_record", Inner::Anynonarray => "anynonarray", @@ -836,7 +936,32 @@ impl Inner { Inner::RegnamespaceArray => "_regnamespace", Inner::Regrole => "regrole", Inner::RegroleArray => "_regrole", + Inner::Regcollation => "regcollation", + Inner::RegcollationArray => "_regcollation", + Inner::Int4multiRange => "int4multirange", + Inner::NummultiRange => "nummultirange", + Inner::TsmultiRange => "tsmultirange", + Inner::TstzmultiRange => "tstzmultirange", + Inner::DatemultiRange => "datemultirange", + Inner::Int8multiRange => "int8multirange", + Inner::AnymultiRange => "anymultirange", + Inner::AnycompatiblemultiRange => "anycompatiblemultirange", + Inner::PgBrinBloomSummary => "pg_brin_bloom_summary", + Inner::PgBrinMinmaxMultiSummary => "pg_brin_minmax_multi_summary", Inner::PgMcvList => "pg_mcv_list", + Inner::PgSnapshot => "pg_snapshot", + Inner::PgSnapshotArray => "_pg_snapshot", + Inner::Xid8 => "xid8", + Inner::Anycompatible => "anycompatible", + Inner::Anycompatiblearray => "anycompatiblearray", + Inner::Anycompatiblenonarray => "anycompatiblenonarray", + Inner::AnycompatibleRange => "anycompatiblerange", + Inner::Int4multiRangeArray => "_int4multirange", + Inner::NummultiRangeArray => "_nummultirange", + Inner::TsmultiRangeArray => "_tsmultirange", + Inner::TstzmultiRangeArray => "_tstzmultirange", + Inner::DatemultiRangeArray => "_datemultirange", + Inner::Int8multiRangeArray => "_int8multirange", Inner::Other(ref u) => &u.name, } } @@ -908,6 +1033,9 @@ impl Type { /// TABLE_AM_HANDLER pub const TABLE_AM_HANDLER: Type = Type(Inner::TableAmHandler); + /// XID8[] + pub const XID8_ARRAY: Type = Type(Inner::Xid8Array); + /// INDEX_AM_HANDLER - pseudo-type for the result of an index AM handler function pub const INDEX_AM_HANDLER: Type = Type(Inner::IndexAmHandler); @@ -1178,9 +1306,6 @@ impl Type { /// INTERNAL - pseudo-type representing an internal data structure pub const INTERNAL: Type = Type(Inner::Internal); - /// OPAQUE - obsolete, deprecated pseudo-type - pub const OPAQUE: Type = Type(Inner::Opaque); - /// ANYELEMENT - pseudo-type representing a polymorphic base type pub const ANYELEMENT: Type = Type(Inner::Anyelement); @@ -1259,7 +1384,7 @@ impl Type { /// JSONB[] pub const JSONB_ARRAY: Type = Type(Inner::JsonbArray); - /// ANYRANGE - pseudo-type representing a polymorphic base type that is a range + /// ANYRANGE - pseudo-type representing a range over a polymorphic base type pub const ANY_RANGE: Type = Type(Inner::AnyRange); /// EVENT_TRIGGER - pseudo-type for the result of an event trigger function @@ -1319,6 +1444,81 @@ impl Type { /// REGROLE[] pub const REGROLE_ARRAY: Type = Type(Inner::RegroleArray); + /// REGCOLLATION - registered collation + pub const REGCOLLATION: Type = Type(Inner::Regcollation); + + /// REGCOLLATION[] + pub const REGCOLLATION_ARRAY: Type = Type(Inner::RegcollationArray); + + /// INT4MULTIRANGE - multirange of integers + pub const INT4MULTI_RANGE: Type = Type(Inner::Int4multiRange); + + /// NUMMULTIRANGE - multirange of numerics + pub const NUMMULTI_RANGE: Type = Type(Inner::NummultiRange); + + /// TSMULTIRANGE - multirange of timestamps without time zone + pub const TSMULTI_RANGE: Type = Type(Inner::TsmultiRange); + + /// TSTZMULTIRANGE - multirange of timestamps with time zone + pub const TSTZMULTI_RANGE: Type = Type(Inner::TstzmultiRange); + + /// DATEMULTIRANGE - multirange of dates + pub const DATEMULTI_RANGE: Type = Type(Inner::DatemultiRange); + + /// INT8MULTIRANGE - multirange of bigints + pub const INT8MULTI_RANGE: Type = Type(Inner::Int8multiRange); + + /// ANYMULTIRANGE - pseudo-type representing a polymorphic base type that is a multirange + pub const ANYMULTI_RANGE: Type = Type(Inner::AnymultiRange); + + /// ANYCOMPATIBLEMULTIRANGE - pseudo-type representing a multirange over a polymorphic common type + pub const ANYCOMPATIBLEMULTI_RANGE: Type = Type(Inner::AnycompatiblemultiRange); + + /// PG_BRIN_BLOOM_SUMMARY - BRIN bloom summary + pub const PG_BRIN_BLOOM_SUMMARY: Type = Type(Inner::PgBrinBloomSummary); + + /// PG_BRIN_MINMAX_MULTI_SUMMARY - BRIN minmax-multi summary + pub const PG_BRIN_MINMAX_MULTI_SUMMARY: Type = Type(Inner::PgBrinMinmaxMultiSummary); + /// PG_MCV_LIST - multivariate MCV list pub const PG_MCV_LIST: Type = Type(Inner::PgMcvList); + + /// PG_SNAPSHOT - snapshot + pub const PG_SNAPSHOT: Type = Type(Inner::PgSnapshot); + + /// PG_SNAPSHOT[] + pub const PG_SNAPSHOT_ARRAY: Type = Type(Inner::PgSnapshotArray); + + /// XID8 - full transaction id + pub const XID8: Type = Type(Inner::Xid8); + + /// ANYCOMPATIBLE - pseudo-type representing a polymorphic common type + pub const ANYCOMPATIBLE: Type = Type(Inner::Anycompatible); + + /// ANYCOMPATIBLEARRAY - pseudo-type representing an array of polymorphic common type elements + pub const ANYCOMPATIBLEARRAY: Type = Type(Inner::Anycompatiblearray); + + /// ANYCOMPATIBLENONARRAY - pseudo-type representing a polymorphic common type that is not an array + pub const ANYCOMPATIBLENONARRAY: Type = Type(Inner::Anycompatiblenonarray); + + /// ANYCOMPATIBLERANGE - pseudo-type representing a range over a polymorphic common type + pub const ANYCOMPATIBLE_RANGE: Type = Type(Inner::AnycompatibleRange); + + /// INT4MULTIRANGE[] + pub const INT4MULTI_RANGE_ARRAY: Type = Type(Inner::Int4multiRangeArray); + + /// NUMMULTIRANGE[] + pub const NUMMULTI_RANGE_ARRAY: Type = Type(Inner::NummultiRangeArray); + + /// TSMULTIRANGE[] + pub const TSMULTI_RANGE_ARRAY: Type = Type(Inner::TsmultiRangeArray); + + /// TSTZMULTIRANGE[] + pub const TSTZMULTI_RANGE_ARRAY: Type = Type(Inner::TstzmultiRangeArray); + + /// DATEMULTIRANGE[] + pub const DATEMULTI_RANGE_ARRAY: Type = Type(Inner::DatemultiRangeArray); + + /// INT8MULTIRANGE[] + pub const INT8MULTI_RANGE_ARRAY: Type = Type(Inner::Int8multiRangeArray); } diff --git a/postgres-types/src/uuid_1.rs b/postgres-types/src/uuid_1.rs new file mode 100644 index 000000000..d9969f60c --- /dev/null +++ b/postgres-types/src/uuid_1.rs @@ -0,0 +1,25 @@ +use bytes::BytesMut; +use postgres_protocol::types; +use std::error::Error; +use uuid_1::Uuid; + +use crate::{FromSql, IsNull, ToSql, Type}; + +impl<'a> FromSql<'a> for Uuid { + fn from_sql(_: &Type, raw: &[u8]) -> Result> { + let bytes = types::uuid_from_sql(raw)?; + Ok(Uuid::from_bytes(bytes)) + } + + accepts!(UUID); +} + +impl ToSql for Uuid { + fn to_sql(&self, _: &Type, w: &mut BytesMut) -> Result> { + types::uuid_to_sql(*self.as_bytes(), w); + Ok(IsNull::No) + } + + accepts!(UUID); + to_sql_checked!(); +} diff --git a/postgres/CHANGELOG.md b/postgres/CHANGELOG.md index 550f0c706..771e2e779 100644 --- a/postgres/CHANGELOG.md +++ b/postgres/CHANGELOG.md @@ -1,5 +1,137 @@ # Change Log +## Unreleased + +## v0.19.10 - 2025-02-02 + +### Added + +* Added support for direct TLS negotiation. +* Added support for `cidr` 0.3 via the `with-cidr-0_3` feature. + +## v0.19.9 - 2024-09-15 + +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. + +## v0.19.8 - 2024-07-21 + +### Added + +* Added `{Client, Transaction, GenericClient}::query_typed`. + +## v0.19.7 - 2023-08-25 + +## Fixed + +* Defered default username lookup to avoid regressing `Config` behavior. + +## v0.19.6 - 2023-08-19 + +### Added + +* Added support for the `hostaddr` config option to bypass DNS lookups. +* Added support for the `load_balance_hosts` config option to randomize connection ordering. +* The `user` config option now defaults to the executing process's user. + +## v0.19.5 - 2023-03-27 + +### Added + +* Added `keepalives_interval` and `keepalives_retries` config options. +* Added the `tcp_user_timeout` config option. +* Added `RowIter::rows_affected`. + +### Changed + +* Passing an incorrect number of parameters to a query method now returns an error instead of panicking. + +## v0.19.4 - 2022-08-21 + +### Added + +* Added `ToSql` and `FromSql` implementations for `[u8; N]` via the `array-impls` feature. +* Added support for `smol_str` 0.1 via the `with-smol_str-01` feature. +* Added `ToSql::encode_format` to support text encodings of parameters. + +## v0.19.3 - 2022-04-30 + +### Added + +* Added support for `uuid` 1.0 via the `with-uuid-1` feature. + +## v0.19.2 - 2021-09-29 + +### Added + +* Added `SimpleQueryRow::columns`. +* Added support for `eui48` 1.0 via the `with-eui48-1` feature. +* Added `FromSql` and `ToSql` implementations for arrays via the `array-impls` feature. +* Added support for `time` 0.3 via the `with-time-0_3` feature. + +## v0.19.1 - 2021-04-03 + +### Added + +* Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. +* Added `Client::clear_type_cache`. + +## v0.19.0 - 2020-12-25 + +### Changed + +* Upgraded to `tokio-postgres` 0.7. +* Methods taking iterators of `ToSql` values can now take both `&dyn ToSql` and `T: ToSql` values. + +### Added + +* Added `Client::is_valid` which can be used to check that the connection is still alive with a + timeout. + +## v0.18.1 - 2020-10-19 + +### Fixed + +* Restored the `Send` implementation for `Client`. + +## v0.18.0 - 2020-10-17 + +### Changed + +* Upgraded to `tokio-postgres` 0.6. + +### Added + +* Added `Config::notice_callback`, which can be used to provide a custom callback for notices. + +### Fixed + +* Fixed client shutdown to explicitly terminate the database session. + +## v0.17.5 - 2020-07-19 + +### Fixed + +* Fixed transactions to roll back immediately on drop. + +## v0.17.4 - 2020-07-03 + +### Added + +* Added support for `geo-types` 0.6. + +## v0.17.3 - 2020-05-01 + +### Fixed + +* Errors sent by the server will now be returned from `Client` methods rather than just being logged. + +### Added + +* Added `Transaction::savepoint`, which can be used to create a savepoint with a custom name. +* Added `Client::notifications`, which returns an interface to the notifications sent by the server. + ## v0.17.2 - 2020-03-05 ### Added @@ -26,7 +158,7 @@ * `Client::query_raw` now returns a named type. * `Client::copy_in` and `Client::copy_out` no longer take query parameters as PostgreSQL doesn't support them in COPY queries. - + ### Removed * Removed support for `uuid` 0.7. diff --git a/postgres/Cargo.toml b/postgres/Cargo.toml index d0cf11004..456bfb808 100644 --- a/postgres/Cargo.toml +++ b/postgres/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "postgres" -version = "0.17.2" +version = "0.19.10" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "A native, synchronous PostgreSQL client" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" @@ -21,22 +21,31 @@ all-features = true circle-ci = { repository = "sfackler/rust-postgres" } [features] +array-impls = ["tokio-postgres/array-impls"] with-bit-vec-0_6 = ["tokio-postgres/with-bit-vec-0_6"] with-chrono-0_4 = ["tokio-postgres/with-chrono-0_4"] +with-cidr-0_2 = ["tokio-postgres/with-cidr-0_2"] +with-cidr-0_3 = ["tokio-postgres/with-cidr-0_3"] with-eui48-0_4 = ["tokio-postgres/with-eui48-0_4"] -with-geo-types-0_4 = ["tokio-postgres/with-geo-types-0_4"] +with-eui48-1 = ["tokio-postgres/with-eui48-1"] +with-geo-types-0_6 = ["tokio-postgres/with-geo-types-0_6"] +with-geo-types-0_7 = ["tokio-postgres/with-geo-types-0_7"] +with-jiff-0_1 = ["tokio-postgres/with-jiff-0_1"] +with-jiff-0_2 = ["tokio-postgres/with-jiff-0_2"] with-serde_json-1 = ["tokio-postgres/with-serde_json-1"] +with-smol_str-01 = ["tokio-postgres/with-smol_str-01"] with-uuid-0_8 = ["tokio-postgres/with-uuid-0_8"] +with-uuid-1 = ["tokio-postgres/with-uuid-1"] with-time-0_2 = ["tokio-postgres/with-time-0_2"] +with-time-0_3 = ["tokio-postgres/with-time-0_3"] [dependencies] -bytes = "0.5" +bytes = "1.0" fallible-iterator = "0.2" -futures = "0.3" -tokio-postgres = { version = "0.5.3", path = "../tokio-postgres" } - -tokio = { version = "0.2", features = ["rt-core", "time"] } +futures-util = { version = "0.3.14", features = ["sink"] } log = "0.4" +tokio-postgres = { version = "0.7.13", path = "../tokio-postgres" } +tokio = { version = "1.0", features = ["rt", "time"] } [dev-dependencies] -criterion = "0.3" +criterion = "0.6" diff --git a/postgres/LICENSE-APACHE b/postgres/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/postgres/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/postgres/LICENSE-APACHE b/postgres/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/postgres/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/postgres/LICENSE-MIT b/postgres/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/postgres/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/postgres/LICENSE-MIT b/postgres/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/postgres/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/postgres/src/binary_copy.rs b/postgres/src/binary_copy.rs index 259347195..1c4eb7d3b 100644 --- a/postgres/src/binary_copy.rs +++ b/postgres/src/binary_copy.rs @@ -1,10 +1,10 @@ //! Utilities for working with the PostgreSQL binary copy format. use crate::connection::ConnectionRef; -use crate::types::{ToSql, Type}; +use crate::types::{BorrowToSql, ToSql, Type}; use crate::{CopyInWriter, CopyOutReader, Error}; use fallible_iterator::FallibleIterator; -use futures::StreamExt; +use futures_util::StreamExt; use std::pin::Pin; #[doc(inline)] pub use tokio_postgres::binary_copy::BinaryCopyOutRow; @@ -46,9 +46,10 @@ impl<'a> BinaryCopyInWriter<'a> { /// # Panics /// /// Panics if the number of values provided does not match the number expected. - pub fn write_raw<'b, I>(&mut self, values: I) -> Result<(), Error> + pub fn write_raw(&mut self, values: I) -> Result<(), Error> where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.connection diff --git a/postgres/src/cancel_token.rs b/postgres/src/cancel_token.rs index f140e60e2..be24edcc8 100644 --- a/postgres/src/cancel_token.rs +++ b/postgres/src/cancel_token.rs @@ -26,9 +26,8 @@ impl CancelToken { where T: MakeTlsConnect, { - runtime::Builder::new() + runtime::Builder::new_current_thread() .enable_all() - .basic_scheduler() .build() .unwrap() // FIXME don't unwrap .block_on(self.0.cancel_query(tls)) diff --git a/postgres/src/client.rs b/postgres/src/client.rs index a0c61b33d..42ce6dec9 100644 --- a/postgres/src/client.rs +++ b/postgres/src/client.rs @@ -3,8 +3,10 @@ use crate::{ CancelToken, Config, CopyInWriter, CopyOutReader, Notifications, RowIter, Statement, ToStatement, Transaction, TransactionBuilder, }; +use std::task::Poll; +use std::time::Duration; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; -use tokio_postgres::types::{ToSql, Type}; +use tokio_postgres::types::{BorrowToSql, ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage, Socket}; /// A synchronous PostgreSQL client. @@ -13,6 +15,12 @@ pub struct Client { client: tokio_postgres::Client, } +impl Drop for Client { + fn drop(&mut self) { + let _ = self.close_inner(); + } +} + impl Client { pub(crate) fn new(connection: Connection, client: tokio_postgres::Client) -> Client { Client { connection, client } @@ -49,10 +57,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Example /// /// ```no_run @@ -88,10 +92,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Examples /// /// ```no_run @@ -126,10 +126,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Examples /// /// ```no_run @@ -164,10 +160,6 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Examples /// /// ```no_run @@ -205,10 +197,6 @@ impl Client { /// It takes an iterator of parameters rather than a slice, and returns an iterator of rows rather than collecting /// them into an array. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// # Examples /// /// ```no_run @@ -220,7 +208,7 @@ impl Client { /// let mut client = Client::connect("host=localhost user=postgres", NoTls)?; /// /// let baz = true; - /// let mut it = client.query_raw("SELECT foo FROM bar WHERE baz = $1", iter::once(&baz as _))?; + /// let mut it = client.query_raw("SELECT foo FROM bar WHERE baz = $1", iter::once(baz))?; /// /// while let Some(row) = it.next()? { /// let foo: i32 = row.get("foo"); @@ -246,7 +234,7 @@ impl Client { /// ]; /// let mut it = client.query_raw( /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", - /// params.iter().map(|p| p as &dyn ToSql), + /// params, /// )?; /// /// while let Some(row) = it.next()? { @@ -256,10 +244,11 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + pub fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let stream = self @@ -268,6 +257,71 @@ impl Client { Ok(RowIter::new(self.connection.as_ref(), stream)) } + /// Like `query`, but requires the types of query parameters to be explicitly specified. + /// + /// Compared to `query`, this method allows performing queries without three round trips (for + /// prepare, execute, and close) by requiring the caller to specify parameter values along with + /// their Postgres type. Thus, this is suitable in environments where prepared statements aren't + /// supported (such as Cloudflare Workers with Hyperdrive). + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the + /// parameter of the list provided, 1-indexed. + pub fn query_typed( + &mut self, + query: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.connection + .block_on(self.client.query_typed(query, params)) + } + + /// The maximally flexible version of [`query_typed`]. + /// + /// Compared to `query`, this method allows performing queries without three round trips (for + /// prepare, execute, and close) by requiring the caller to specify parameter values along with + /// their Postgres type. Thus, this is suitable in environments where prepared statements aren't + /// supported (such as Cloudflare Workers with Hyperdrive). + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the + /// parameter of the list provided, 1-indexed. + /// + /// [`query_typed`]: #method.query_typed + /// + /// # Examples + /// ```no_run + /// # use postgres::{Client, NoTls}; + /// use postgres::types::{ToSql, Type}; + /// use fallible_iterator::FallibleIterator; + /// # fn main() -> Result<(), postgres::Error> { + /// # let mut client = Client::connect("host=localhost user=postgres", NoTls)?; + /// + /// let params: Vec<(String, Type)> = vec![ + /// ("first param".into(), Type::TEXT), + /// ("second param".into(), Type::TEXT), + /// ]; + /// let mut it = client.query_typed_raw( + /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", + /// params, + /// )?; + /// + /// while let Some(row) = it.next()? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } + /// ``` + pub fn query_typed_raw(&mut self, query: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator, + { + let stream = self + .connection + .block_on(self.client.query_typed_raw(query, params))?; + Ok(RowIter::new(self.connection.as_ref(), stream)) + } + /// Creates a new prepared statement. /// /// Prepared statements can be executed repeatedly, and may contain query parameters (indicated by `$1`, `$2`, etc), @@ -398,13 +452,27 @@ impl Client { /// /// # Warning /// - /// Prepared statements should be use for any query which contains user-specified data, as they provided the - /// functionality to safely imbed that data in the request. Do not form statements via string concatenation and pass + /// Prepared statements should be used for any query which contains user-specified data, as they provided the + /// functionality to safely embed that data in the request. Do not form statements via string concatenation and pass /// them to this method! pub fn simple_query(&mut self, query: &str) -> Result, Error> { self.connection.block_on(self.client.simple_query(query)) } + /// Validates the connection by performing a simple no-op query. + /// + /// If the specified timeout is reached before the backend responds, an error will be returned. + pub fn is_valid(&mut self, timeout: Duration) -> Result<(), Error> { + let inner_client = &self.client; + self.connection.block_on(async { + let trivial_query = inner_client.simple_query(""); + tokio::time::timeout(timeout, trivial_query) + .await + .map_err(|_| Error::__private_api_timeout())? + .map(|_| ()) + }) + } + /// Executes a sequence of SQL statements using the simple query protocol. /// /// Statements should be separated by semicolons. If an error occurs, execution of the sequence will stop at that @@ -478,8 +546,8 @@ impl Client { Notifications::new(self.connection.as_ref()) } - /// Constructs a cancellation token that can later be used to request - /// cancellation of a query running on this connection. + /// Constructs a cancellation token that can later be used to request cancellation of a query running on this + /// connection. /// /// # Examples /// @@ -518,10 +586,39 @@ impl Client { CancelToken::new(self.client.cancel_token()) } + /// Clears the client's type information cache. + /// + /// When user-defined types are used in a query, the client loads their definitions from the database and caches + /// them for the lifetime of the client. If those definitions are changed in the database, this method can be used + /// to flush the local cache and allow the new, updated definitions to be loaded. + pub fn clear_type_cache(&self) { + self.client.clear_type_cache(); + } + /// Determines if the client's connection has already closed. /// /// If this returns `true`, the client is no longer usable. pub fn is_closed(&self) -> bool { self.client.is_closed() } + + /// Closes the client's connection to the server. + /// + /// This is equivalent to `Client`'s `Drop` implementation, except that it returns any error encountered to the + /// caller. + pub fn close(mut self) -> Result<(), Error> { + self.close_inner() + } + + fn close_inner(&mut self) -> Result<(), Error> { + self.client.__private_api_close(); + + self.connection.poll_block_on(|_, _, done| { + if done { + Poll::Ready(Ok(())) + } else { + Poll::Pending + } + }) + } } diff --git a/postgres/src/config.rs b/postgres/src/config.rs index b344efdd2..c7f932ba7 100644 --- a/postgres/src/config.rs +++ b/postgres/src/config.rs @@ -1,16 +1,22 @@ //! Connection configuration. -//! -//! Requires the `runtime` Cargo feature (enabled by default). + +#![allow(clippy::doc_overindented_list_items)] use crate::connection::Connection; use crate::Client; +use log::info; use std::fmt; +use std::net::IpAddr; use std::path::Path; use std::str::FromStr; +use std::sync::Arc; use std::time::Duration; use tokio::runtime; #[doc(inline)] -pub use tokio_postgres::config::{ChannelBinding, Host, SslMode, TargetSessionAttrs}; +pub use tokio_postgres::config::{ + ChannelBinding, Host, LoadBalanceHosts, SslMode, SslNegotiation, TargetSessionAttrs, +}; +use tokio_postgres::error::DbError; use tokio_postgres::tls::{MakeTlsConnect, TlsConnect}; use tokio_postgres::{Error, Socket}; @@ -25,7 +31,7 @@ use tokio_postgres::{Error, Socket}; /// /// ## Keys /// -/// * `user` - The username to authenticate with. Required. +/// * `user` - The username to authenticate with. Defaults to the user executing this process. /// * `password` - The password to authenticate with. /// * `dbname` - The name of the database to connect to. Defaults to the username. /// * `options` - Command line options used to configure the server. @@ -36,18 +42,50 @@ use tokio_postgres::{Error, Socket}; /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting /// with the `connect` method. +/// * `sslnegotiation` - TLS negotiation method. If set to `direct`, the client will perform direct TLS handshake, this only works for PostgreSQL 17 and newer. +/// Note that you will need to setup ALPN of TLS client configuration to `postgresql` when using direct TLS. +/// If set to `postgres`, the default value, it follows original postgres wire protocol to perform the negotiation. +/// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, +/// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. +/// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, +/// or if host specifies an IP address, that value will be used directly. +/// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications +/// with time constraints. However, a host name is required for TLS certificate verification. +/// Specifically: +/// * If `hostaddr` is specified without `host`, the value for `hostaddr` gives the server network address. +/// The connection attempt will fail if the authentication method requires a host name; +/// * If `host` is specified without `hostaddr`, a host name lookup occurs; +/// * If both `host` and `hostaddr` are specified, the value for `hostaddr` gives the server network address. +/// The value for `host` is ignored unless the authentication method requires it, +/// in which case it will be used as the host name. /// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be /// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if /// omitted or the empty string. /// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames /// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. +/// * `tcp_user_timeout` - The time limit that transmitted data may remain unacknowledged before a connection is forcibly closed. +/// This is ignored for Unix domain socket connections. It is only supported on systems where TCP_USER_TIMEOUT is available +/// and will default to the system default if omitted or set to 0; on other systems, it has no effect. /// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. /// This option is ignored when connecting with Unix sockets. Defaults to on. /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. /// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. +/// * `keepalives_interval` - The time interval between TCP keepalive probes. +/// This option is ignored when connecting with Unix sockets. +/// * `keepalives_retries` - The maximum number of TCP keepalive probes that will be sent before dropping a connection. +/// This option is ignored when connecting with Unix sockets. /// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that /// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server /// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. +/// * `channel_binding` - Controls usage of channel binding in the authentication process. If set to `disable`, channel +/// binding will not be used. If set to `prefer`, channel binding will be used if available, but not used otherwise. +/// If set to `require`, the authentication process will fail if channel binding is not used. Defaults to `prefer`. +/// * `load_balance_hosts` - Controls the order in which the client tries to connect to the available hosts and +/// addresses. Once a connection attempt is successful no other hosts and addresses will be tried. This parameter +/// is typically used in combination with multiple host names or a DNS record that returns multiple IPs. If set to +/// `disable`, hosts and addresses will be tried in the order provided. If set to `random`, hosts will be tried +/// in a random order, and the IP addresses resolved from a hostname will also be tried in a random order. Defaults +/// to `disable`. /// /// ## Examples /// @@ -60,13 +98,17 @@ use tokio_postgres::{Error, Socket}; /// ``` /// /// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 hostaddr=127.0.0.1,127.0.0.2,127.0.0.3 user=postgres target_session_attrs=read-write +/// ``` +/// +/// ```not_rust /// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write /// ``` /// /// # Url /// /// This format resembles a URL with a scheme of either `postgres://` or `postgresql://`. All components are optional, -/// and the format accept query parameters for all of the key-value pairs described in the section above. Multiple +/// and the format accepts query parameters for all of the key-value pairs described in the section above. Multiple /// host/port pairs can be comma-separated. Unix socket paths in the host section of the URL should be percent-encoded, /// as the path component of the URL specifies the database name. /// @@ -90,6 +132,7 @@ use tokio_postgres::{Error, Socket}; #[derive(Clone)] pub struct Config { config: tokio_postgres::Config, + notice_callback: Arc, } impl fmt::Debug for Config { @@ -109,14 +152,12 @@ impl Default for Config { impl Config { /// Creates a new configuration. pub fn new() -> Config { - Config { - config: tokio_postgres::Config::new(), - } + tokio_postgres::Config::new().into() } /// Sets the user to authenticate with. /// - /// Required. + /// If the user is not set, then this defaults to the user executing this process. pub fn user(&mut self, user: &str) -> &mut Config { self.config.user(user); self @@ -194,10 +235,22 @@ impl Config { self.config.get_ssl_mode() } + /// Sets the SSL negotiation method + pub fn ssl_negotiation(&mut self, ssl_negotiation: SslNegotiation) -> &mut Config { + self.config.ssl_negotiation(ssl_negotiation); + self + } + + /// Gets the SSL negotiation method + pub fn get_ssl_negotiation(&self) -> SslNegotiation { + self.config.get_ssl_negotiation() + } + /// Adds a host to the configuration. /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. + /// There must be either no hosts, or the same number of hosts as hostaddrs. pub fn host(&mut self, host: &str) -> &mut Config { self.config.host(host); self @@ -208,6 +261,11 @@ impl Config { self.config.get_hosts() } + /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. + pub fn get_hostaddrs(&self) -> &[IpAddr] { + self.config.get_hostaddrs() + } + /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. @@ -220,6 +278,15 @@ impl Config { self } + /// Adds a hostaddr to the configuration. + /// + /// Multiple hostaddrs can be specified by calling this method multiple times, and each will be tried in order. + /// There must be either no hostaddrs, or the same number of hostaddrs as hosts. + pub fn hostaddr(&mut self, hostaddr: IpAddr) -> &mut Config { + self.config.hostaddr(hostaddr); + self + } + /// Adds a port to the configuration. /// /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which @@ -250,6 +317,22 @@ impl Config { self.config.get_connect_timeout() } + /// Sets the TCP user timeout. + /// + /// This is ignored for Unix domain socket connections. It is only supported on systems where + /// TCP_USER_TIMEOUT is available and will default to the system default if omitted or set to 0; + /// on other systems, it has no effect. + pub fn tcp_user_timeout(&mut self, tcp_user_timeout: Duration) -> &mut Config { + self.config.tcp_user_timeout(tcp_user_timeout); + self + } + + /// Gets the TCP user timeout, if one has been set with the + /// `user_timeout` method. + pub fn get_tcp_user_timeout(&self) -> Option<&Duration> { + self.config.get_tcp_user_timeout() + } + /// Controls the use of TCP keepalive. /// /// This is ignored for Unix domain socket connections. Defaults to `true`. @@ -277,6 +360,33 @@ impl Config { self.config.get_keepalives_idle() } + /// Sets the time interval between TCP keepalive probes. + /// On Windows, this sets the value of the tcp_keepalive struct’s keepaliveinterval field. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + pub fn keepalives_interval(&mut self, keepalives_interval: Duration) -> &mut Config { + self.config.keepalives_interval(keepalives_interval); + self + } + + /// Gets the time interval between TCP keepalive probes. + pub fn get_keepalives_interval(&self) -> Option { + self.config.get_keepalives_interval() + } + + /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + pub fn keepalives_retries(&mut self, keepalives_retries: u32) -> &mut Config { + self.config.keepalives_retries(keepalives_retries); + self + } + + /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + pub fn get_keepalives_retries(&self) -> Option { + self.config.get_keepalives_retries() + } + /// Sets the requirements of the session. /// /// This can be used to connect to the primary server in a clustered database rather than one of the read-only @@ -307,6 +417,38 @@ impl Config { self.config.get_channel_binding() } + /// Sets the host load balancing behavior. + /// + /// Defaults to `disable`. + pub fn load_balance_hosts(&mut self, load_balance_hosts: LoadBalanceHosts) -> &mut Config { + self.config.load_balance_hosts(load_balance_hosts); + self + } + + /// Gets the host load balancing behavior. + pub fn get_load_balance_hosts(&self) -> LoadBalanceHosts { + self.config.get_load_balance_hosts() + } + + /// Sets the notice callback. + /// + /// This callback will be invoked with the contents of every + /// [`AsyncMessage::Notice`] that is received by the connection. Notices use + /// the same structure as errors, but they are not "errors" per-se. + /// + /// Notices are distinct from notifications, which are instead accessible + /// via the [`Notifications`] API. + /// + /// [`AsyncMessage::Notice`]: tokio_postgres::AsyncMessage::Notice + /// [`Notifications`]: crate::Notifications + pub fn notice_callback(&mut self, f: F) -> &mut Config + where + F: Fn(DbError) + Send + Sync + 'static, + { + self.notice_callback = Arc::new(f); + self + } + /// Opens a connection to a PostgreSQL database. pub fn connect(&self, tls: T) -> Result where @@ -315,15 +457,14 @@ impl Config { T::Stream: Send, >::Future: Send, { - let mut runtime = runtime::Builder::new() + let runtime = runtime::Builder::new_current_thread() .enable_all() - .basic_scheduler() .build() .unwrap(); // FIXME don't unwrap let (client, connection) = runtime.block_on(self.config.connect(tls))?; - let connection = Connection::new(runtime, connection); + let connection = Connection::new(runtime, connection, self.notice_callback.clone()); Ok(Client::new(connection, client)) } } @@ -338,6 +479,11 @@ impl FromStr for Config { impl From for Config { fn from(config: tokio_postgres::Config) -> Config { - Config { config } + Config { + config, + notice_callback: Arc::new(|notice| { + info!("{}: {}", notice.severity(), notice.message()) + }), + } } } diff --git a/postgres/src/connection.rs b/postgres/src/connection.rs index acea5eca7..b91c16555 100644 --- a/postgres/src/connection.rs +++ b/postgres/src/connection.rs @@ -1,24 +1,29 @@ use crate::{Error, Notification}; -use futures::future; -use futures::{pin_mut, Stream}; -use log::info; +use futures_util::{future, pin_mut, Stream}; use std::collections::VecDeque; use std::future::Future; use std::ops::{Deref, DerefMut}; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::runtime::Runtime; +use tokio_postgres::error::DbError; use tokio_postgres::AsyncMessage; pub struct Connection { runtime: Runtime, connection: Pin> + Send>>, notifications: VecDeque, + notice_callback: Arc, } impl Connection { - pub fn new(runtime: Runtime, connection: tokio_postgres::Connection) -> Connection + pub fn new( + runtime: Runtime, + connection: tokio_postgres::Connection, + notice_callback: Arc, + ) -> Connection where S: AsyncRead + AsyncWrite + Unpin + 'static + Send, T: AsyncRead + AsyncWrite + Unpin + 'static + Send, @@ -27,6 +32,7 @@ impl Connection { runtime, connection: Box::pin(ConnectionStream { connection }), notifications: VecDeque::new(), + notice_callback, } } @@ -38,7 +44,8 @@ impl Connection { where F: FnOnce() -> T, { - self.runtime.enter(f) + let _guard = self.runtime.enter(); + f() } pub fn block_on(&mut self, future: F) -> Result @@ -55,6 +62,7 @@ impl Connection { { let connection = &mut self.connection; let notifications = &mut self.notifications; + let notice_callback = &mut self.notice_callback; self.runtime.block_on({ future::poll_fn(|cx| { let done = loop { @@ -63,7 +71,7 @@ impl Connection { notifications.push_back(notification); } Poll::Ready(Some(Ok(AsyncMessage::Notice(notice)))) => { - info!("{}: {}", notice.severity(), notice.message()); + notice_callback(notice) } Poll::Ready(Some(Ok(_))) => {} Poll::Ready(Some(Err(e))) => return Poll::Ready(Err(e)), diff --git a/postgres/src/copy_in_writer.rs b/postgres/src/copy_in_writer.rs index c996ed857..83c642c73 100644 --- a/postgres/src/copy_in_writer.rs +++ b/postgres/src/copy_in_writer.rs @@ -1,7 +1,7 @@ use crate::connection::ConnectionRef; use crate::lazy_pin::LazyPin; use bytes::{Bytes, BytesMut}; -use futures::SinkExt; +use futures_util::SinkExt; use std::io; use std::io::Write; use tokio_postgres::{CopyInSink, Error}; @@ -53,7 +53,6 @@ impl Write for CopyInWriter<'_> { } fn flush(&mut self) -> io::Result<()> { - self.flush_inner() - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + self.flush_inner().map_err(io::Error::other) } } diff --git a/postgres/src/copy_out_reader.rs b/postgres/src/copy_out_reader.rs index a205d1a1a..b683ddeec 100644 --- a/postgres/src/copy_out_reader.rs +++ b/postgres/src/copy_out_reader.rs @@ -1,7 +1,7 @@ use crate::connection::ConnectionRef; use crate::lazy_pin::LazyPin; use bytes::{Buf, Bytes}; -use futures::StreamExt; +use futures_util::StreamExt; use std::io::{self, BufRead, Read}; use tokio_postgres::CopyOutStream; @@ -34,19 +34,19 @@ impl Read for CopyOutReader<'_> { impl BufRead for CopyOutReader<'_> { fn fill_buf(&mut self) -> io::Result<&[u8]> { - if !self.cur.has_remaining() { + while !self.cur.has_remaining() { let mut stream = self.stream.pinned(); match self .connection - .block_on({ async { stream.next().await.transpose() } }) + .block_on(async { stream.next().await.transpose() }) { Ok(Some(cur)) => self.cur = cur, - Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), - Ok(None) => {} + Err(e) => return Err(io::Error::other(e)), + Ok(None) => break, }; } - Ok(self.cur.bytes()) + Ok(&self.cur) } fn consume(&mut self, amt: usize) { diff --git a/postgres/src/generic_client.rs b/postgres/src/generic_client.rs index 42a466df6..7b534867c 100644 --- a/postgres/src/generic_client.rs +++ b/postgres/src/generic_client.rs @@ -1,4 +1,4 @@ -use crate::types::{ToSql, Type}; +use crate::types::{BorrowToSql, ToSql, Type}; use crate::{ Client, CopyInWriter, CopyOutReader, Error, Row, RowIter, SimpleQueryMessage, Statement, ToStatement, Transaction, @@ -37,12 +37,26 @@ pub trait GenericClient: private::Sealed { T: ?Sized + ToStatement; /// Like `Client::query_raw`. - fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator; + /// Like [`Client::query_typed`] + fn query_typed( + &mut self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error>; + + /// Like [`Client::query_typed_raw`] + fn query_typed_raw(&mut self, statement: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator + Sync + Send; + /// Like `Client::prepare`. fn prepare(&mut self, query: &str) -> Result; @@ -104,15 +118,32 @@ impl GenericClient for Client { self.query_opt(query, params) } - fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.query_raw(query, params) } + fn query_typed( + &mut self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_typed(statement, params) + } + + fn query_typed_raw(&mut self, statement: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator + Sync + Send, + { + self.query_typed_raw(statement, params) + } + fn prepare(&mut self, query: &str) -> Result { self.prepare(query) } @@ -183,15 +214,32 @@ impl GenericClient for Transaction<'_> { self.query_opt(query, params) } - fn query_raw<'a, T, I>(&mut self, query: &T, params: I) -> Result, Error> + fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.query_raw(query, params) } + fn query_typed( + &mut self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_typed(statement, params) + } + + fn query_typed_raw(&mut self, statement: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator + Sync + Send, + { + self.query_typed_raw(statement, params) + } + fn prepare(&mut self, query: &str) -> Result { self.prepare(query) } diff --git a/postgres/src/lib.rs b/postgres/src/lib.rs index 80380a87e..ddf1609ad 100644 --- a/postgres/src/lib.rs +++ b/postgres/src/lib.rs @@ -55,12 +55,15 @@ //! | ------- | ----------- | ------------------ | ------- | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | -//! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | -//! | `with-geo-types-0_4` | Enable support for the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types) 0.4 | no | -//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [eui48](https://crates.io/crates/serde_json) 1.0 | no | +//! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. This is deprecated and will be removed. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | +//! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | +//! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | +//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | -//! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | -#![doc(html_root_url = "https://docs.rs/postgres/0.17")] +//! | `with-uuid-1` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 1.0 | no | +//! | `with-time-0_2` | Enable support for the 0.2 version of the `time` crate. | [time](https://crates.io/crates/time/0.2.0) 0.2 | no | +//! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | #![warn(clippy::all, rust_2018_idioms, missing_docs)] pub use fallible_iterator; diff --git a/postgres/src/notifications.rs b/postgres/src/notifications.rs index e8c681548..0c040dedf 100644 --- a/postgres/src/notifications.rs +++ b/postgres/src/notifications.rs @@ -3,10 +3,11 @@ use crate::connection::ConnectionRef; use crate::{Error, Notification}; use fallible_iterator::FallibleIterator; -use futures::{ready, FutureExt}; +use futures_util::{ready, FutureExt}; +use std::pin::Pin; use std::task::Poll; use std::time::Duration; -use tokio::time::{self, Delay, Instant}; +use tokio::time::{self, Instant, Sleep}; /// Notifications from a PostgreSQL backend. pub struct Notifications<'a> { @@ -64,7 +65,7 @@ impl<'a> Notifications<'a> { /// This iterator may start returning `Some` after previously returning `None` if more notifications are received. pub fn timeout_iter(&mut self, timeout: Duration) -> TimeoutIter<'_> { TimeoutIter { - delay: self.connection.enter(|| time::delay_for(timeout)), + delay: Box::pin(self.connection.enter(|| time::sleep(timeout))), timeout, connection: self.connection.as_ref(), } @@ -76,7 +77,7 @@ pub struct Iter<'a> { connection: ConnectionRef<'a>, } -impl<'a> FallibleIterator for Iter<'a> { +impl FallibleIterator for Iter<'_> { type Item = Notification; type Error = Error; @@ -99,7 +100,7 @@ pub struct BlockingIter<'a> { connection: ConnectionRef<'a>, } -impl<'a> FallibleIterator for BlockingIter<'a> { +impl FallibleIterator for BlockingIter<'_> { type Item = Notification; type Error = Error; @@ -124,17 +125,17 @@ impl<'a> FallibleIterator for BlockingIter<'a> { /// A time-limited blocking iterator over pending notifications. pub struct TimeoutIter<'a> { connection: ConnectionRef<'a>, - delay: Delay, + delay: Pin>, timeout: Duration, } -impl<'a> FallibleIterator for TimeoutIter<'a> { +impl FallibleIterator for TimeoutIter<'_> { type Item = Notification; type Error = Error; fn next(&mut self) -> Result, Self::Error> { if let Some(notification) = self.connection.notifications_mut().pop_front() { - self.delay.reset(Instant::now() + self.timeout); + self.delay.as_mut().reset(Instant::now() + self.timeout); return Ok(Some(notification)); } @@ -143,7 +144,7 @@ impl<'a> FallibleIterator for TimeoutIter<'a> { self.connection.poll_block_on(|cx, notifications, done| { match notifications.pop_front() { Some(notification) => { - delay.reset(Instant::now() + timeout); + delay.as_mut().reset(Instant::now() + timeout); return Poll::Ready(Ok(Some(notification))); } None if done => return Poll::Ready(Ok(None)), diff --git a/postgres/src/row_iter.rs b/postgres/src/row_iter.rs index 3cd41b900..221fdfc68 100644 --- a/postgres/src/row_iter.rs +++ b/postgres/src/row_iter.rs @@ -1,6 +1,6 @@ use crate::connection::ConnectionRef; use fallible_iterator::FallibleIterator; -use futures::StreamExt; +use futures_util::StreamExt; use std::pin::Pin; use tokio_postgres::{Error, Row, RowStream}; @@ -17,6 +17,13 @@ impl<'a> RowIter<'a> { it: Box::pin(stream), } } + + /// Returns the number of rows affected by the query. + /// + /// This function will return `None` until the iterator has been exhausted. + pub fn rows_affected(&self) -> Option { + self.it.rows_affected() + } } impl FallibleIterator for RowIter<'_> { diff --git a/postgres/src/test.rs b/postgres/src/test.rs index 9edde8e32..0fd404574 100644 --- a/postgres/src/test.rs +++ b/postgres/src/test.rs @@ -1,4 +1,6 @@ use std::io::{Read, Write}; +use std::str::FromStr; +use std::sync::mpsc; use std::thread; use std::time::Duration; use tokio_postgres::error::SqlState; @@ -100,6 +102,31 @@ fn transaction_drop() { assert_eq!(rows.len(), 0); } +#[test] +fn transaction_drop_immediate_rollback() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + let mut client2 = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .simple_query("CREATE TABLE IF NOT EXISTS foo (id SERIAL PRIMARY KEY)") + .unwrap(); + + client + .execute("INSERT INTO foo VALUES (1) ON CONFLICT DO NOTHING", &[]) + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + transaction + .execute("SELECT * FROM foo FOR UPDATE", &[]) + .unwrap(); + + drop(transaction); + + let rows = client2.query("SELECT * FROM foo FOR UPDATE", &[]).unwrap(); + assert_eq!(rows.len(), 1); +} + #[test] fn nested_transactions() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); @@ -151,6 +178,57 @@ fn nested_transactions() { assert_eq!(rows[2].get::<_, i32>(0), 4); } +#[test] +fn savepoints() { + let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + + client + .batch_execute("CREATE TEMPORARY TABLE foo (id INT PRIMARY KEY)") + .unwrap(); + + let mut transaction = client.transaction().unwrap(); + + transaction + .execute("INSERT INTO foo (id) VALUES (1)", &[]) + .unwrap(); + + let mut savepoint1 = transaction.savepoint("savepoint1").unwrap(); + + savepoint1 + .execute("INSERT INTO foo (id) VALUES (2)", &[]) + .unwrap(); + + savepoint1.rollback().unwrap(); + + let rows = transaction + .query("SELECT id FROM foo ORDER BY id", &[]) + .unwrap(); + assert_eq!(rows.len(), 1); + assert_eq!(rows[0].get::<_, i32>(0), 1); + + let mut savepoint2 = transaction.savepoint("savepoint2").unwrap(); + + savepoint2 + .execute("INSERT INTO foo (id) VALUES(3)", &[]) + .unwrap(); + + let mut savepoint3 = savepoint2.savepoint("savepoint3").unwrap(); + + savepoint3 + .execute("INSERT INTO foo (id) VALUES(4)", &[]) + .unwrap(); + + savepoint3.commit().unwrap(); + savepoint2.commit().unwrap(); + transaction.commit().unwrap(); + + let rows = client.query("SELECT id FROM foo ORDER BY id", &[]).unwrap(); + assert_eq!(rows.len(), 3); + assert_eq!(rows[0].get::<_, i32>(0), 1); + assert_eq!(rows[1].get::<_, i32>(0), 3); + assert_eq!(rows[2].get::<_, i32>(0), 4); +} + #[test] fn copy_in() { let mut client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); @@ -399,3 +477,34 @@ fn notifications_timeout_iter() { assert_eq!(notifications[0].payload(), "hello"); assert_eq!(notifications[1].payload(), "world"); } + +#[test] +fn notice_callback() { + let (notice_tx, notice_rx) = mpsc::sync_channel(64); + let mut client = Config::from_str("host=localhost port=5433 user=postgres") + .unwrap() + .notice_callback(move |n| notice_tx.send(n).unwrap()) + .connect(NoTls) + .unwrap(); + + client + .batch_execute("DO $$BEGIN RAISE NOTICE 'custom'; END$$") + .unwrap(); + + assert_eq!(notice_rx.recv().unwrap().message(), "custom"); +} + +#[test] +fn explicit_close() { + let client = Client::connect("host=localhost port=5433 user=postgres", NoTls).unwrap(); + client.close().unwrap(); +} + +#[test] +fn check_send() { + fn is_send() {} + + is_send::(); + is_send::(); + is_send::>(); +} diff --git a/postgres/src/transaction.rs b/postgres/src/transaction.rs index 25bfff578..8126b1dbe 100644 --- a/postgres/src/transaction.rs +++ b/postgres/src/transaction.rs @@ -1,6 +1,6 @@ use crate::connection::ConnectionRef; use crate::{CancelToken, CopyInWriter, CopyOutReader, Portal, RowIter, Statement, ToStatement}; -use tokio_postgres::types::{ToSql, Type}; +use tokio_postgres::types::{BorrowToSql, ToSql, Type}; use tokio_postgres::{Error, Row, SimpleQueryMessage}; /// A representation of a PostgreSQL database transaction. @@ -9,7 +9,15 @@ use tokio_postgres::{Error, Row, SimpleQueryMessage}; /// in the transaction. Transactions can be nested, with inner transactions implemented via savepoints. pub struct Transaction<'a> { connection: ConnectionRef<'a>, - transaction: tokio_postgres::Transaction<'a>, + transaction: Option>, +} + +impl Drop for Transaction<'_> { + fn drop(&mut self) { + if let Some(transaction) = self.transaction.take() { + let _ = self.connection.block_on(transaction.rollback()); + } + } } impl<'a> Transaction<'a> { @@ -19,31 +27,38 @@ impl<'a> Transaction<'a> { ) -> Transaction<'a> { Transaction { connection, - transaction, + transaction: Some(transaction), } } /// Consumes the transaction, committing all changes made within it. pub fn commit(mut self) -> Result<(), Error> { - self.connection.block_on(self.transaction.commit()) + self.connection + .block_on(self.transaction.take().unwrap().commit()) } /// Rolls the transaction back, discarding all changes made within it. /// /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. pub fn rollback(mut self) -> Result<(), Error> { - self.connection.block_on(self.transaction.rollback()) + self.connection + .block_on(self.transaction.take().unwrap().rollback()) } /// Like `Client::prepare`. pub fn prepare(&mut self, query: &str) -> Result { - self.connection.block_on(self.transaction.prepare(query)) + self.connection + .block_on(self.transaction.as_ref().unwrap().prepare(query)) } /// Like `Client::prepare_typed`. pub fn prepare_typed(&mut self, query: &str, types: &[Type]) -> Result { - self.connection - .block_on(self.transaction.prepare_typed(query, types)) + self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .prepare_typed(query, types), + ) } /// Like `Client::execute`. @@ -52,7 +67,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.execute(query, params)) + .block_on(self.transaction.as_ref().unwrap().execute(query, params)) } /// Like `Client::query`. @@ -61,7 +76,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.query(query, params)) + .block_on(self.transaction.as_ref().unwrap().query(query, params)) } /// Like `Client::query_one`. @@ -70,7 +85,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.query_one(query, params)) + .block_on(self.transaction.as_ref().unwrap().query_one(query, params)) } /// Like `Client::query_opt`. @@ -83,19 +98,49 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.query_opt(query, params)) + .block_on(self.transaction.as_ref().unwrap().query_opt(query, params)) } /// Like `Client::query_raw`. - pub fn query_raw<'b, T, I>(&mut self, query: &T, params: I) -> Result, Error> + pub fn query_raw(&mut self, query: &T, params: I) -> Result, Error> where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let stream = self .connection - .block_on(self.transaction.query_raw(query, params))?; + .block_on(self.transaction.as_ref().unwrap().query_raw(query, params))?; + Ok(RowIter::new(self.connection.as_ref(), stream)) + } + + /// Like `Client::query_typed`. + pub fn query_typed( + &mut self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .query_typed(statement, params), + ) + } + + /// Like `Client::query_typed_raw`. + pub fn query_typed_raw(&mut self, query: &str, params: I) -> Result, Error> + where + P: BorrowToSql, + I: IntoIterator, + { + let stream = self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .query_typed_raw(query, params), + )?; Ok(RowIter::new(self.connection.as_ref(), stream)) } @@ -114,7 +159,7 @@ impl<'a> Transaction<'a> { T: ?Sized + ToStatement, { self.connection - .block_on(self.transaction.bind(query, params)) + .block_on(self.transaction.as_ref().unwrap().bind(query, params)) } /// Continues execution of a portal, returning the next set of rows. @@ -122,8 +167,12 @@ impl<'a> Transaction<'a> { /// Unlike `query`, portals can be incrementally evaluated by limiting the number of rows returned in each call to /// `query_portal`. If the requested number is negative or 0, all remaining rows will be returned. pub fn query_portal(&mut self, portal: &Portal, max_rows: i32) -> Result, Error> { - self.connection - .block_on(self.transaction.query_portal(portal, max_rows)) + self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .query_portal(portal, max_rows), + ) } /// The maximally flexible version of `query_portal`. @@ -132,9 +181,12 @@ impl<'a> Transaction<'a> { portal: &Portal, max_rows: i32, ) -> Result, Error> { - let stream = self - .connection - .block_on(self.transaction.query_portal_raw(portal, max_rows))?; + let stream = self.connection.block_on( + self.transaction + .as_ref() + .unwrap() + .query_portal_raw(portal, max_rows), + )?; Ok(RowIter::new(self.connection.as_ref(), stream)) } @@ -143,7 +195,9 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let sink = self.connection.block_on(self.transaction.copy_in(query))?; + let sink = self + .connection + .block_on(self.transaction.as_ref().unwrap().copy_in(query))?; Ok(CopyInWriter::new(self.connection.as_ref(), sink)) } @@ -152,33 +206,45 @@ impl<'a> Transaction<'a> { where T: ?Sized + ToStatement, { - let stream = self.connection.block_on(self.transaction.copy_out(query))?; + let stream = self + .connection + .block_on(self.transaction.as_ref().unwrap().copy_out(query))?; Ok(CopyOutReader::new(self.connection.as_ref(), stream)) } /// Like `Client::simple_query`. pub fn simple_query(&mut self, query: &str) -> Result, Error> { self.connection - .block_on(self.transaction.simple_query(query)) + .block_on(self.transaction.as_ref().unwrap().simple_query(query)) } /// Like `Client::batch_execute`. pub fn batch_execute(&mut self, query: &str) -> Result<(), Error> { self.connection - .block_on(self.transaction.batch_execute(query)) + .block_on(self.transaction.as_ref().unwrap().batch_execute(query)) } /// Like `Client::cancel_token`. pub fn cancel_token(&self) -> CancelToken { - CancelToken::new(self.transaction.cancel_token()) + CancelToken::new(self.transaction.as_ref().unwrap().cancel_token()) } - /// Like `Client::transaction`. + /// Like `Client::transaction`, but creates a nested transaction via a savepoint. pub fn transaction(&mut self) -> Result, Error> { - let transaction = self.connection.block_on(self.transaction.transaction())?; - Ok(Transaction { - connection: self.connection.as_ref(), - transaction, - }) + let transaction = self + .connection + .block_on(self.transaction.as_mut().unwrap().transaction())?; + Ok(Transaction::new(self.connection.as_ref(), transaction)) + } + + /// Like `Client::transaction`, but creates a nested transaction via a savepoint with the specified name. + pub fn savepoint(&mut self, name: I) -> Result, Error> + where + I: Into, + { + let transaction = self + .connection + .block_on(self.transaction.as_mut().unwrap().savepoint(name))?; + Ok(Transaction::new(self.connection.as_ref(), transaction)) } } diff --git a/tokio-postgres/CHANGELOG.md b/tokio-postgres/CHANGELOG.md index e65f00f14..a67f69ea7 100644 --- a/tokio-postgres/CHANGELOG.md +++ b/tokio-postgres/CHANGELOG.md @@ -1,5 +1,179 @@ # Change Log +## Unreleased + +## v0.7.13 - 2025-02-02 + +### Added + +* Added support for direct TLS negotiation. +* Added support for `cidr` 0.3 via the `with-cidr-0_3` feature. + +### Fixes + +* Added `load_balance_hosts` to `Config`'s `Debug` implementation. + +### Changes + +* Upgraded `rand`. + +## v0.7.12 - 2024-09-15 + +### Fixed + +* Fixed `query_typed` queries that return no rows. + +### Added + +* Added support for `jiff` 0.1 via the `with-jiff-01` feature. +* Added support for TCP keepalive on AIX. + +## v0.7.11 - 2024-07-21 + +### Fixed + +* Fixed handling of non-UTF8 error fields which can be sent after failed handshakes. +* Fixed cancellation handling of `TransactionBuilder::start` futures. + +### Added + +* Added `table_oid` and `field_id` fields to `Columns` struct of prepared statements. +* Added `GenericClient::simple_query`. +* Added `#[track_caller]` to `Row::get` and `SimpleQueryRow::get`. +* Added `TargetSessionAttrs::ReadOnly`. +* Added `Debug` implementation for `Statement`. +* Added `Clone` implementation for `Row`. +* Added `SimpleQueryMessage::RowDescription`. +* Added `{Client, Transaction, GenericClient}::query_typed`. + +### Changed + +* Disable `rustc-serialize` compatibility of `eui48-1` dependency +* Config setters now take `impl Into`. + +## v0.7.10 - 2023-08-25 + +## Fixed + +* Defered default username lookup to avoid regressing `Config` behavior. + +## v0.7.9 - 2023-08-19 + +## Fixed + +* Fixed builds on OpenBSD. + +## Added + +* Added the `js` feature for WASM support. +* Added support for the `hostaddr` config option to bypass DNS lookups. +* Added support for the `load_balance_hosts` config option to randomize connection ordering. +* The `user` config option now defaults to the executing process's user. + +## v0.7.8 - 2023-05-27 + +## Added + +* Added `keepalives_interval` and `keepalives_retries` config options. +* Added new `SqlState` variants. +* Added more `Debug` impls. +* Added `GenericClient::batch_execute`. +* Added `RowStream::rows_affected`. +* Added the `tcp_user_timeout` config option. + +## Changed + +* Passing an incorrect number of parameters to a query method now returns an error instead of panicking. +* Upgraded `socket2`. + +## v0.7.7 - 2022-08-21 + +## Added + +* Added `ToSql` and `FromSql` implementations for `[u8; N]` via the `array-impls` feature. +* Added support for `smol_str` 0.1 via the `with-smol_str-01` feature. +* Added `ToSql::encode_format` to support text encodings of parameters. + +## v0.7.6 - 2022-04-30 + +### Added + +* Added support for `uuid` 1.0 via the `with-uuid-1` feature. + +### Changed + +* Upgraded to `tokio-util` 0.7. +* Upgraded to `parking_lot` 0.12. + +## v0.7.5 - 2021-10-29 + +### Fixed + +* Fixed a bug where the client could enter into a transaction if the `Client::transaction` future was dropped before completion. + +## v0.7.4 - 2021-10-19 + +### Fixed + +* Fixed reporting of commit-time errors triggered by deferred constraints. + +## v0.7.3 - 2021-09-29 + +### Fixed + +* Fixed a deadlock when pipelined requests concurrently prepare cached typeinfo queries. + +### Added + +* Added `SimpleQueryRow::columns`. +* Added support for `eui48` 1.0 via the `with-eui48-1` feature. +* Added `FromSql` and `ToSql` implementations for arrays via the `array-impls` feature. +* Added support for `time` 0.3 via the `with-time-0_3` feature. + +## v0.7.2 - 2021-04-25 + +### Fixed + +* `SqlState` constants can now be used in `match` patterns. + +## v0.7.1 - 2021-04-03 + +### Added + +* Added support for `geo-types` 0.7 via `with-geo-types-0_7` feature. +* Added `Client::clear_type_cache`. +* Added `Error::as_db_error` and `Error::is_closed`. + +## v0.7.0 - 2020-12-25 + +### Changed + +* Upgraded to `tokio` 1.0. +* Upgraded to `postgres-types` 0.2. + +### Added + +* Methods taking iterators of `ToSql` values can now take both `&dyn ToSql` and `T: ToSql` values. + +## v0.6.0 - 2020-10-17 + +### Changed + +* Upgraded to `tokio` 0.3. +* Added the detail and hint fields to `DbError`'s `Display` implementation. + +## v0.5.5 - 2020-07-03 + +### Added + +* Added support for `geo-types` 0.6. + +## v0.5.4 - 2020-05-01 + +### Added + +* Added `Transaction::savepoint`, which can be used to create a savepoint with a custom name. + ## v0.5.3 - 2020-03-05 ### Added diff --git a/tokio-postgres/Cargo.toml b/tokio-postgres/Cargo.toml index d1571c352..f969ae5b7 100644 --- a/tokio-postgres/Cargo.toml +++ b/tokio-postgres/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "tokio-postgres" -version = "0.5.3" +version = "0.7.13" authors = ["Steven Fackler "] edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" description = "A native, asynchronous PostgreSQL client" repository = "https://github.com/sfackler/rust-postgres" readme = "../README.md" @@ -25,43 +25,72 @@ circle-ci = { repository = "sfackler/rust-postgres" } [features] default = ["runtime"] -runtime = ["tokio/dns", "tokio/net", "tokio/time"] +runtime = ["tokio/net", "tokio/time"] +array-impls = ["postgres-types/array-impls"] with-bit-vec-0_6 = ["postgres-types/with-bit-vec-0_6"] with-chrono-0_4 = ["postgres-types/with-chrono-0_4"] +with-cidr-0_2 = ["postgres-types/with-cidr-0_2"] +with-cidr-0_3 = ["postgres-types/with-cidr-0_3"] with-eui48-0_4 = ["postgres-types/with-eui48-0_4"] -with-geo-types-0_4 = ["postgres-types/with-geo-types-0_4"] +with-eui48-1 = ["postgres-types/with-eui48-1"] +with-geo-types-0_6 = ["postgres-types/with-geo-types-0_6"] +with-geo-types-0_7 = ["postgres-types/with-geo-types-0_7"] +with-jiff-0_1 = ["postgres-types/with-jiff-0_1"] +with-jiff-0_2 = ["postgres-types/with-jiff-0_2"] with-serde_json-1 = ["postgres-types/with-serde_json-1"] +with-smol_str-01 = ["postgres-types/with-smol_str-01"] with-uuid-0_8 = ["postgres-types/with-uuid-0_8"] +with-uuid-1 = ["postgres-types/with-uuid-1"] with-time-0_2 = ["postgres-types/with-time-0_2"] +with-time-0_3 = ["postgres-types/with-time-0_3"] +js = ["postgres-protocol/js", "postgres-types/js"] [dependencies] async-trait = "0.1" -bytes = "0.5" +bytes = "1.0" byteorder = "1.0" fallible-iterator = "0.2" -futures = "0.3" +futures-channel = { version = "0.3", features = ["sink"] } +futures-util = { version = "0.3", features = ["sink"] } log = "0.4" -parking_lot = "0.10" +parking_lot = "0.12" percent-encoding = "2.0" -pin-project-lite = "0.1" -phf = "0.8" -postgres-protocol = { version = "0.5.0", path = "../postgres-protocol" } -postgres-types = { version = "0.1.1", path = "../postgres-types" } -tokio = { version = "0.2", features = ["io-util"] } -tokio-util = { version = "0.3", features = ["codec"] } +pin-project-lite = "0.2" +phf = "0.11" +postgres-protocol = { version = "0.6.8", path = "../postgres-protocol" } +postgres-types = { version = "0.2.9", path = "../postgres-types" } +tokio = { version = "1.27", features = ["io-util"] } +tokio-util = { version = "0.7", features = ["codec"] } +rand = "0.9.0" +whoami = "1.4.1" + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +socket2 = { version = "0.5", features = ["all"] } [dev-dependencies] -tokio = { version = "0.2", features = ["full"] } -env_logger = "0.7" -criterion = "0.3" +futures-executor = "0.3" +criterion = "0.6" +env_logger = "0.11" +tokio = { version = "1.0", features = [ + "macros", + "net", + "rt", + "rt-multi-thread", + "time", +] } bit-vec-06 = { version = "0.6", package = "bit-vec" } -chrono-04 = { version = "0.4", package = "chrono" } -eui48-04 = { version = "0.4", package = "eui48" } -geo-types-04 = { version = "0.4", package = "geo-types" } +chrono-04 = { version = "0.4", package = "chrono", default-features = false } +eui48-1 = { version = "1.0", package = "eui48", default-features = false } +geo-types-06 = { version = "0.6", package = "geo-types" } +geo-types-07 = { version = "0.7", package = "geo-types" } +jiff-01 = { version = "0.1", package = "jiff" } +jiff-02 = { version = "0.2", package = "jiff" } serde-1 = { version = "1.0", package = "serde" } serde_json-1 = { version = "1.0", package = "serde_json" } +smol_str-01 = { version = "0.1", package = "smol_str" } uuid-08 = { version = "0.8", package = "uuid" } +uuid-1 = { version = "1.0", package = "uuid" } time-02 = { version = "0.2", package = "time" } - +time-03 = { version = "0.3", package = "time", features = ["parsing"] } diff --git a/tokio-postgres/LICENSE-APACHE b/tokio-postgres/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/tokio-postgres/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/tokio-postgres/LICENSE-APACHE b/tokio-postgres/LICENSE-APACHE new file mode 120000 index 000000000..965b606f3 --- /dev/null +++ b/tokio-postgres/LICENSE-APACHE @@ -0,0 +1 @@ +../LICENSE-APACHE \ No newline at end of file diff --git a/tokio-postgres/LICENSE-MIT b/tokio-postgres/LICENSE-MIT deleted file mode 100644 index 71803aea1..000000000 --- a/tokio-postgres/LICENSE-MIT +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Steven Fackler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/tokio-postgres/LICENSE-MIT b/tokio-postgres/LICENSE-MIT new file mode 120000 index 000000000..76219eb72 --- /dev/null +++ b/tokio-postgres/LICENSE-MIT @@ -0,0 +1 @@ +../LICENSE-MIT \ No newline at end of file diff --git a/tokio-postgres/benches/bench.rs b/tokio-postgres/benches/bench.rs index 315bea8e4..a8f9b5f1a 100644 --- a/tokio-postgres/benches/bench.rs +++ b/tokio-postgres/benches/bench.rs @@ -1,13 +1,12 @@ use criterion::{criterion_group, criterion_main, Criterion}; -use futures::channel::oneshot; -use futures::executor; +use futures_channel::oneshot; use std::sync::Arc; use std::time::Instant; use tokio::runtime::Runtime; use tokio_postgres::{Client, NoTls}; fn setup() -> (Client, Runtime) { - let mut runtime = Runtime::new().unwrap(); + let runtime = Runtime::new().unwrap(); let (client, conn) = runtime .block_on(tokio_postgres::connect( "host=localhost port=5433 user=postgres", @@ -19,7 +18,7 @@ fn setup() -> (Client, Runtime) { } fn query_prepared(c: &mut Criterion) { - let (client, mut runtime) = setup(); + let (client, runtime) = setup(); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("runtime_block_on", move |b| { b.iter(|| { @@ -29,13 +28,13 @@ fn query_prepared(c: &mut Criterion) { }) }); - let (client, mut runtime) = setup(); + let (client, runtime) = setup(); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("executor_block_on", move |b| { - b.iter(|| executor::block_on(client.query(&statement, &[&1i64])).unwrap()) + b.iter(|| futures_executor::block_on(client.query(&statement, &[&1i64])).unwrap()) }); - let (client, mut runtime) = setup(); + let (client, runtime) = setup(); let client = Arc::new(client); let statement = runtime.block_on(client.prepare("SELECT $1::INT8")).unwrap(); c.bench_function("spawned", move |b| { @@ -50,7 +49,7 @@ fn query_prepared(c: &mut Criterion) { } tx.send(start.elapsed()).unwrap(); }); - executor::block_on(rx).unwrap() + futures_executor::block_on(rx).unwrap() }) }); } diff --git a/tokio-postgres/src/binary_copy.rs b/tokio-postgres/src/binary_copy.rs index 231f202d8..dab141663 100644 --- a/tokio-postgres/src/binary_copy.rs +++ b/tokio-postgres/src/binary_copy.rs @@ -4,8 +4,9 @@ use crate::types::{FromSql, IsNull, ToSql, Type, WrongType}; use crate::{slice_iter, CopyInSink, CopyOutStream, Error}; use byteorder::{BigEndian, ByteOrder}; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use futures::{ready, SinkExt, Stream}; +use futures_util::{ready, SinkExt, Stream}; use pin_project_lite::pin_project; +use postgres_types::BorrowToSql; use std::convert::TryFrom; use std::io; use std::io::Cursor; @@ -58,9 +59,10 @@ impl BinaryCopyInWriter { /// # Panics /// /// Panics if the number of values provided does not match the number expected. - pub async fn write_raw<'a, I>(self: Pin<&mut Self>, values: I) -> Result<(), Error> + pub async fn write_raw(self: Pin<&mut Self>, values: I) -> Result<(), Error> where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let mut this = self.project(); @@ -79,6 +81,7 @@ impl BinaryCopyInWriter { let idx = this.buf.len(); this.buf.put_i32(0); let len = match value + .borrow_to_sql() .to_sql_checked(type_, this.buf) .map_err(|e| Error::to_sql(e, i))? { @@ -150,7 +153,7 @@ impl Stream for BinaryCopyOutStream { Some(header) => header.has_oids, None => { check_remaining(&chunk, HEADER_LEN)?; - if &chunk.bytes()[..MAGIC.len()] != MAGIC { + if !chunk.chunk().starts_with(MAGIC) { return Poll::Ready(Some(Err(Error::parse(io::Error::new( io::ErrorKind::InvalidData, "invalid magic value", diff --git a/tokio-postgres/src/bind.rs b/tokio-postgres/src/bind.rs index 69823a9ab..9c5c49218 100644 --- a/tokio-postgres/src/bind.rs +++ b/tokio-postgres/src/bind.rs @@ -1,7 +1,7 @@ use crate::client::InnerClient; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::types::ToSql; +use crate::types::BorrowToSql; use crate::{query, Error, Portal, Statement}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -10,13 +10,14 @@ use std::sync::Arc; static NEXT_ID: AtomicUsize = AtomicUsize::new(0); -pub async fn bind<'a, I>( +pub async fn bind( client: &Arc, statement: Statement, params: I, ) -> Result where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let name = format!("p{}", NEXT_ID.fetch_add(1, Ordering::SeqCst)); diff --git a/tokio-postgres/src/cancel_query.rs b/tokio-postgres/src/cancel_query.rs index d7bb50474..2dfd47c06 100644 --- a/tokio-postgres/src/cancel_query.rs +++ b/tokio-postgres/src/cancel_query.rs @@ -1,5 +1,5 @@ use crate::client::SocketConfig; -use crate::config::{Host, SslMode}; +use crate::config::{SslMode, SslNegotiation}; use crate::tls::MakeTlsConnect; use crate::{cancel_query_raw, connect_socket, Error, Socket}; use std::io; @@ -7,6 +7,7 @@ use std::io; pub(crate) async fn cancel_query( config: Option, ssl_mode: SslMode, + ssl_negotiation: SslNegotiation, mut tls: T, process_id: i32, secret_key: i32, @@ -24,24 +25,28 @@ where } }; - let hostname = match &config.host { - Host::Tcp(host) => &**host, - // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter - #[cfg(unix)] - Host::Unix(_) => "", - }; let tls = tls - .make_tls_connect(hostname) + .make_tls_connect(config.hostname.as_deref().unwrap_or("")) .map_err(|e| Error::tls(e.into()))?; + let has_hostname = config.hostname.is_some(); let socket = connect_socket::connect_socket( - &config.host, + &config.addr, config.port, config.connect_timeout, - config.keepalives, - config.keepalives_idle, + config.tcp_user_timeout, + config.keepalive.as_ref(), ) .await?; - cancel_query_raw::cancel_query_raw(socket, ssl_mode, tls, process_id, secret_key).await + cancel_query_raw::cancel_query_raw( + socket, + ssl_mode, + ssl_negotiation, + tls, + has_hostname, + process_id, + secret_key, + ) + .await } diff --git a/tokio-postgres/src/cancel_query_raw.rs b/tokio-postgres/src/cancel_query_raw.rs index c89dc581f..886606497 100644 --- a/tokio-postgres/src/cancel_query_raw.rs +++ b/tokio-postgres/src/cancel_query_raw.rs @@ -1,4 +1,4 @@ -use crate::config::SslMode; +use crate::config::{SslMode, SslNegotiation}; use crate::tls::TlsConnect; use crate::{connect_tls, Error}; use bytes::BytesMut; @@ -8,7 +8,9 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; pub async fn cancel_query_raw( stream: S, mode: SslMode, + negotiation: SslNegotiation, tls: T, + has_hostname: bool, process_id: i32, secret_key: i32, ) -> Result<(), Error> @@ -16,7 +18,7 @@ where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - let mut stream = connect_tls::connect_tls(stream, mode, tls).await?; + let mut stream = connect_tls::connect_tls(stream, mode, negotiation, tls, has_hostname).await?; let mut buf = BytesMut::new(); frontend::cancel_request(process_id, secret_key, &mut buf); diff --git a/tokio-postgres/src/cancel_token.rs b/tokio-postgres/src/cancel_token.rs index d048a3c82..1652bec72 100644 --- a/tokio-postgres/src/cancel_token.rs +++ b/tokio-postgres/src/cancel_token.rs @@ -1,4 +1,4 @@ -use crate::config::SslMode; +use crate::config::{SslMode, SslNegotiation}; use crate::tls::TlsConnect; #[cfg(feature = "runtime")] use crate::{cancel_query, client::SocketConfig, tls::MakeTlsConnect, Socket}; @@ -12,6 +12,7 @@ pub struct CancelToken { #[cfg(feature = "runtime")] pub(crate) socket_config: Option, pub(crate) ssl_mode: SslMode, + pub(crate) ssl_negotiation: SslNegotiation, pub(crate) process_id: i32, pub(crate) secret_key: i32, } @@ -37,6 +38,7 @@ impl CancelToken { cancel_query::cancel_query( self.socket_config.clone(), self.ssl_mode, + self.ssl_negotiation, tls, self.process_id, self.secret_key, @@ -54,7 +56,9 @@ impl CancelToken { cancel_query_raw::cancel_query_raw( stream, self.ssl_mode, + self.ssl_negotiation, tls, + true, self.process_id, self.secret_key, ) diff --git a/tokio-postgres/src/client.rs b/tokio-postgres/src/client.rs index 2d9b79728..b38bbba37 100644 --- a/tokio-postgres/src/client.rs +++ b/tokio-postgres/src/client.rs @@ -1,7 +1,9 @@ use crate::codec::BackendMessages; -use crate::config::{Host, SslMode}; +use crate::config::{SslMode, SslNegotiation}; use crate::connection::{Request, RequestMessages}; use crate::copy_out::CopyOutStream; +#[cfg(feature = "runtime")] +use crate::keepalive::KeepaliveConfig; use crate::query::RowStream; use crate::simple_query::SimpleQueryStream; #[cfg(feature = "runtime")] @@ -16,14 +18,20 @@ use crate::{ }; use bytes::{Buf, BytesMut}; use fallible_iterator::FallibleIterator; -use futures::channel::mpsc; -use futures::{future, pin_mut, ready, StreamExt, TryStreamExt}; +use futures_channel::mpsc; +use futures_util::{future, pin_mut, ready, StreamExt, TryStreamExt}; use parking_lot::Mutex; use postgres_protocol::message::backend::Message; +use postgres_types::BorrowToSql; use std::collections::HashMap; use std::fmt; +#[cfg(feature = "runtime")] +use std::net::IpAddr; +#[cfg(feature = "runtime")] +use std::path::PathBuf; use std::sync::Arc; use std::task::{Context, Poll}; +#[cfg(feature = "runtime")] use std::time::Duration; use tokio::io::{AsyncRead, AsyncWrite}; @@ -53,17 +61,32 @@ impl Responses { } } -struct State { +/// A cache of type info and prepared statements for fetching type info +/// (corresponding to the queries in the [prepare](prepare) module). +#[derive(Default)] +struct CachedTypeInfo { + /// A statement for basic information for a type from its + /// OID. Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_QUERY) (or its + /// fallback). typeinfo: Option, + /// A statement for getting information for a composite type from its OID. + /// Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_COMPOSITE_QUERY). typeinfo_composite: Option, + /// A statement for getting information for a composite type from its OID. + /// Corresponds to [TYPEINFO_QUERY](prepare::TYPEINFO_COMPOSITE_QUERY) (or + /// its fallback). typeinfo_enum: Option, + + /// Cache of types already looked up. types: HashMap, - buf: BytesMut, } pub struct InnerClient { sender: mpsc::UnboundedSender, - state: Mutex, + cached_typeinfo: Mutex, + + /// A buffer to use when writing out postgres commands. + buffer: Mutex, } impl InnerClient { @@ -81,55 +104,71 @@ impl InnerClient { } pub fn typeinfo(&self) -> Option { - self.state.lock().typeinfo.clone() + self.cached_typeinfo.lock().typeinfo.clone() } pub fn set_typeinfo(&self, statement: &Statement) { - self.state.lock().typeinfo = Some(statement.clone()); + self.cached_typeinfo.lock().typeinfo = Some(statement.clone()); } pub fn typeinfo_composite(&self) -> Option { - self.state.lock().typeinfo_composite.clone() + self.cached_typeinfo.lock().typeinfo_composite.clone() } pub fn set_typeinfo_composite(&self, statement: &Statement) { - self.state.lock().typeinfo_composite = Some(statement.clone()); + self.cached_typeinfo.lock().typeinfo_composite = Some(statement.clone()); } pub fn typeinfo_enum(&self) -> Option { - self.state.lock().typeinfo_enum.clone() + self.cached_typeinfo.lock().typeinfo_enum.clone() } pub fn set_typeinfo_enum(&self, statement: &Statement) { - self.state.lock().typeinfo_enum = Some(statement.clone()); + self.cached_typeinfo.lock().typeinfo_enum = Some(statement.clone()); } pub fn type_(&self, oid: Oid) -> Option { - self.state.lock().types.get(&oid).cloned() + self.cached_typeinfo.lock().types.get(&oid).cloned() } pub fn set_type(&self, oid: Oid, type_: &Type) { - self.state.lock().types.insert(oid, type_.clone()); + self.cached_typeinfo.lock().types.insert(oid, type_.clone()); + } + + pub fn clear_type_cache(&self) { + self.cached_typeinfo.lock().types.clear(); } + /// Call the given function with a buffer to be used when writing out + /// postgres commands. pub fn with_buf(&self, f: F) -> R where F: FnOnce(&mut BytesMut) -> R, { - let mut state = self.state.lock(); - let r = f(&mut state.buf); - state.buf.clear(); + let mut buffer = self.buffer.lock(); + let r = f(&mut buffer); + buffer.clear(); r } } +#[cfg(feature = "runtime")] #[derive(Clone)] pub(crate) struct SocketConfig { - pub host: Host, + pub addr: Addr, + pub hostname: Option, pub port: u16, pub connect_timeout: Option, - pub keepalives: bool, - pub keepalives_idle: Duration, + pub tcp_user_timeout: Option, + pub keepalive: Option, +} + +#[cfg(feature = "runtime")] +#[derive(Clone)] +pub(crate) enum Addr { + Tcp(IpAddr), + #[cfg(unix)] + Unix(PathBuf), } /// An asynchronous PostgreSQL client. @@ -141,6 +180,7 @@ pub struct Client { #[cfg(feature = "runtime")] socket_config: Option, ssl_mode: SslMode, + ssl_negotiation: SslNegotiation, process_id: i32, secret_key: i32, } @@ -149,23 +189,20 @@ impl Client { pub(crate) fn new( sender: mpsc::UnboundedSender, ssl_mode: SslMode, + ssl_negotiation: SslNegotiation, process_id: i32, secret_key: i32, ) -> Client { Client { inner: Arc::new(InnerClient { sender, - state: Mutex::new(State { - typeinfo: None, - typeinfo_composite: None, - typeinfo_enum: None, - types: HashMap::new(), - buf: BytesMut::new(), - }), + cached_typeinfo: Default::default(), + buffer: Default::default(), }), #[cfg(feature = "runtime")] socket_config: None, ssl_mode, + ssl_negotiation, process_id, secret_key, } @@ -208,10 +245,6 @@ impl Client { /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. pub async fn query( &self, statement: &T, @@ -236,10 +269,6 @@ impl Client { /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. pub async fn query_one( &self, statement: &T, @@ -248,19 +277,9 @@ impl Client { where T: ?Sized + ToStatement, { - let stream = self.query_raw(statement, slice_iter(params)).await?; - pin_mut!(stream); - - let row = match stream.try_next().await? { - Some(row) => row, - None => return Err(Error::row_count()), - }; - - if stream.try_next().await?.is_some() { - return Err(Error::row_count()); - } - - Ok(row) + self.query_opt(statement, params) + .await + .and_then(|res| res.ok_or_else(Error::row_count)) } /// Executes a statements which returns zero or one rows, returning it. @@ -273,10 +292,6 @@ impl Client { /// The `statement` argument can either be a `Statement`, or a raw query string. If the same statement will be /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. pub async fn query_opt( &self, statement: &T, @@ -288,16 +303,22 @@ impl Client { let stream = self.query_raw(statement, slice_iter(params)).await?; pin_mut!(stream); - let row = match stream.try_next().await? { - Some(row) => row, - None => return Ok(None), - }; + let mut first = None; + + // Originally this was two calls to `try_next().await?`, + // once for the first element, and second to error if more than one. + // + // However, this new form with only one .await in a loop generates + // slightly smaller codegen/stack usage for the resulting future. + while let Some(row) = stream.try_next().await? { + if first.is_some() { + return Err(Error::row_count()); + } - if stream.try_next().await?.is_some() { - return Err(Error::row_count()); + first = Some(row); } - Ok(Some(row)) + Ok(first) } /// The maximally flexible version of [`query`]. @@ -309,21 +330,13 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// [`query`]: #method.query /// /// # Examples /// - /// If you have a type like `Vec` where `T: ToSql` Rust will not know how to use it as params. To get around - /// this the type must explicitly be converted to `&dyn ToSql`. - /// /// ```no_run /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { - /// use tokio_postgres::types::ToSql; - /// use futures::{pin_mut, TryStreamExt}; + /// use futures_util::{pin_mut, TryStreamExt}; /// /// let params: Vec = vec![ /// "first param".into(), @@ -331,7 +344,7 @@ impl Client { /// ]; /// let mut it = client.query_raw( /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", - /// params.iter().map(|p| p as &dyn ToSql), + /// params, /// ).await?; /// /// pin_mut!(it); @@ -342,16 +355,81 @@ impl Client { /// # Ok(()) /// # } /// ``` - pub async fn query_raw<'a, T, I>(&self, statement: &T, params: I) -> Result + pub async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let statement = statement.__convert().into_statement(self).await?; query::query(&self.inner, statement, params).await } + /// Like `query`, but requires the types of query parameters to be explicitly specified. + /// + /// Compared to `query`, this method allows performing queries without three round trips (for + /// prepare, execute, and close) by requiring the caller to specify parameter values along with + /// their Postgres type. Thus, this is suitable in environments where prepared statements aren't + /// supported (such as Cloudflare Workers with Hyperdrive). + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the + /// parameter of the list provided, 1-indexed. + pub async fn query_typed( + &self, + query: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_typed_raw(query, params.iter().map(|(v, t)| (*v, t.clone()))) + .await? + .try_collect() + .await + } + + /// The maximally flexible version of [`query_typed`]. + /// + /// Compared to `query`, this method allows performing queries without three round trips (for + /// prepare, execute, and close) by requiring the caller to specify parameter values along with + /// their Postgres type. Thus, this is suitable in environments where prepared statements aren't + /// supported (such as Cloudflare Workers with Hyperdrive). + /// + /// A statement may contain parameters, specified by `$n`, where `n` is the index of the + /// parameter of the list provided, 1-indexed. + /// + /// [`query_typed`]: #method.query_typed + /// + /// # Examples + /// + /// ```no_run + /// # async fn async_main(client: &tokio_postgres::Client) -> Result<(), tokio_postgres::Error> { + /// use futures_util::{pin_mut, TryStreamExt}; + /// use tokio_postgres::types::Type; + /// + /// let params: Vec<(String, Type)> = vec![ + /// ("first param".into(), Type::TEXT), + /// ("second param".into(), Type::TEXT), + /// ]; + /// let mut it = client.query_typed_raw( + /// "SELECT foo FROM bar WHERE biz = $1 AND baz = $2", + /// params, + /// ).await?; + /// + /// pin_mut!(it); + /// while let Some(row) = it.try_next().await? { + /// let foo: i32 = row.get("foo"); + /// println!("foo: {}", foo); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn query_typed_raw(&self, query: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator, + { + query::query_typed(&self.inner, query, params).await + } + /// Executes a statement, returning the number of rows modified. /// /// A statement may contain parameters, specified by `$n`, where `n` is the index of the parameter of the list @@ -362,10 +440,6 @@ impl Client { /// with the `prepare` method. /// /// If the statement does not modify any rows (e.g. `SELECT`), 0 is returned. - /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. pub async fn execute( &self, statement: &T, @@ -386,15 +460,12 @@ impl Client { /// repeatedly executed (perhaps with different query parameters), consider preparing the statement up front /// with the `prepare` method. /// - /// # Panics - /// - /// Panics if the number of parameters provided does not match the number expected. - /// /// [`execute`]: #method.execute - pub async fn execute_raw<'a, T, I>(&self, statement: &T, params: I) -> Result + pub async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let statement = statement.__convert().into_statement(self).await?; @@ -405,10 +476,6 @@ impl Client { /// /// PostgreSQL does not support parameters in `COPY` statements, so this method does not take any. The copy *must* /// be explicitly completed via the `Sink::close` or `finish` methods. If it is not, the copy will be aborted. - /// - /// # Panics - /// - /// Panics if the statement contains parameters. pub async fn copy_in(&self, statement: &T) -> Result, Error> where T: ?Sized + ToStatement, @@ -421,10 +488,6 @@ impl Client { /// Executes a `COPY TO STDOUT` statement, returning a stream of the resulting data. /// /// PostgreSQL does not support parameters in `COPY` statements, so this method does not take any. - /// - /// # Panics - /// - /// Panics if the statement contains parameters. pub async fn copy_out(&self, statement: &T) -> Result where T: ?Sized + ToStatement, @@ -472,8 +535,7 @@ impl Client { /// /// The transaction will roll back by default - use the `commit` method to commit it. pub async fn transaction(&mut self) -> Result, Error> { - self.batch_execute("BEGIN").await?; - Ok(Transaction::new(self)) + self.build_transaction().start().await } /// Returns a builder for a transaction with custom settings. @@ -484,14 +546,14 @@ impl Client { TransactionBuilder::new(self) } - /// Constructs a cancellation token that can later be used to request - /// cancellation of a query running on the connection associated with - /// this client. + /// Constructs a cancellation token that can later be used to request cancellation of a query running on the + /// connection associated with this client. pub fn cancel_token(&self) -> CancelToken { CancelToken { #[cfg(feature = "runtime")] socket_config: self.socket_config.clone(), ssl_mode: self.ssl_mode, + ssl_negotiation: self.ssl_negotiation, process_id: self.process_id, secret_key: self.secret_key, } @@ -523,12 +585,26 @@ impl Client { self.cancel_token().cancel_query_raw(stream, tls).await } + /// Clears the client's type information cache. + /// + /// When user-defined types are used in a query, the client loads their definitions from the database and caches + /// them for the lifetime of the client. If those definitions are changed in the database, this method can be used + /// to flush the local cache and allow the new, updated definitions to be loaded. + pub fn clear_type_cache(&self) { + self.inner().clear_type_cache(); + } + /// Determines if the connection to the server has already closed. /// /// In that case, all future queries will fail. pub fn is_closed(&self) -> bool { self.inner.sender.is_closed() } + + #[doc(hidden)] + pub fn __private_api_close(&mut self) { + self.inner.sender.close_channel() + } } impl fmt::Debug for Client { diff --git a/tokio-postgres/src/config.rs b/tokio-postgres/src/config.rs index da171cc79..59edd8fe2 100644 --- a/tokio-postgres/src/config.rs +++ b/tokio-postgres/src/config.rs @@ -1,8 +1,12 @@ //! Connection configuration. +#![allow(clippy::doc_overindented_list_items)] + #[cfg(feature = "runtime")] use crate::connect::connect; use crate::connect_raw::connect_raw; +#[cfg(not(target_arch = "wasm32"))] +use crate::keepalive::KeepaliveConfig; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; use crate::tls::TlsConnect; @@ -12,6 +16,8 @@ use crate::{Client, Connection, Error}; use std::borrow::Cow; #[cfg(unix)] use std::ffi::OsStr; +use std::net::IpAddr; +use std::ops::Deref; #[cfg(unix)] use std::os::unix::ffi::OsStrExt; #[cfg(unix)] @@ -23,17 +29,19 @@ use std::{error, fmt, iter, mem}; use tokio::io::{AsyncRead, AsyncWrite}; /// Properties required of a session. -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[non_exhaustive] pub enum TargetSessionAttrs { /// No special properties are required. Any, /// The session must allow writes. ReadWrite, + /// The session allow only reads. + ReadOnly, } /// TLS configuration. -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[non_exhaustive] pub enum SslMode { /// Do not use TLS. @@ -44,8 +52,22 @@ pub enum SslMode { Require, } +/// TLS negotiation configuration +/// +/// See more information at +/// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNECT-SSLNEGOTIATION +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +#[non_exhaustive] +pub enum SslNegotiation { + /// Use PostgreSQL SslRequest for Ssl negotiation + #[default] + Postgres, + /// Start Ssl handshake without negotiation, only works for PostgreSQL 17+ + Direct, +} + /// Channel binding configuration. -#[derive(Debug, Copy, Clone, PartialEq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[non_exhaustive] pub enum ChannelBinding { /// Do not use channel binding. @@ -56,8 +78,18 @@ pub enum ChannelBinding { Require, } +/// Load balancing configuration. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[non_exhaustive] +pub enum LoadBalanceHosts { + /// Make connection attempts to hosts in the order provided. + Disable, + /// Make connection attempts to hosts in a random order. + Random, +} + /// A host specification. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Host { /// A TCP hostname. Tcp(String), @@ -79,7 +111,7 @@ pub enum Host { /// /// ## Keys /// -/// * `user` - The username to authenticate with. Required. +/// * `user` - The username to authenticate with. Defaults to the user executing this process. /// * `password` - The password to authenticate with. /// * `dbname` - The name of the database to connect to. Defaults to the username. /// * `options` - Command line options used to configure the server. @@ -90,21 +122,56 @@ pub enum Host { /// path to the directory containing Unix domain sockets. Otherwise, it is treated as a hostname. Multiple hosts /// can be specified, separated by commas. Each host will be tried in turn when connecting. Required if connecting /// with the `connect` method. +/// * `sslnegotiation` - TLS negotiation method. If set to `direct`, the client +/// will perform direct TLS handshake, this only works for PostgreSQL 17 and +/// newer. +/// Note that you will need to setup ALPN of TLS client configuration to +/// `postgresql` when using direct TLS. If you are using postgres_openssl +/// as TLS backend, a `postgres_openssl::set_postgresql_alpn` helper is +/// provided for that. +/// If set to `postgres`, the default value, it follows original postgres +/// wire protocol to perform the negotiation. +/// * `hostaddr` - Numeric IP address of host to connect to. This should be in the standard IPv4 address format, +/// e.g., 172.28.40.9. If your machine supports IPv6, you can also use those addresses. +/// If this parameter is not specified, the value of `host` will be looked up to find the corresponding IP address, +/// or if host specifies an IP address, that value will be used directly. +/// Using `hostaddr` allows the application to avoid a host name look-up, which might be important in applications +/// with time constraints. However, a host name is required for TLS certificate verification. +/// Specifically: +/// * If `hostaddr` is specified without `host`, the value for `hostaddr` gives the server network address. +/// The connection attempt will fail if the authentication method requires a host name; +/// * If `host` is specified without `hostaddr`, a host name lookup occurs; +/// * If both `host` and `hostaddr` are specified, the value for `hostaddr` gives the server network address. +/// The value for `host` is ignored unless the authentication method requires it, +/// in which case it will be used as the host name. /// * `port` - The port to connect to. Multiple ports can be specified, separated by commas. The number of ports must be /// either 1, in which case it will be used for all hosts, or the same as the number of hosts. Defaults to 5432 if /// omitted or the empty string. /// * `connect_timeout` - The time limit in seconds applied to each socket-level connection attempt. Note that hostnames /// can resolve to multiple IP addresses, and this limit is applied to each address. Defaults to no timeout. +/// * `tcp_user_timeout` - The time limit that transmitted data may remain unacknowledged before a connection is forcibly closed. +/// This is ignored for Unix domain socket connections. It is only supported on systems where TCP_USER_TIMEOUT is available +/// and will default to the system default if omitted or set to 0; on other systems, it has no effect. /// * `keepalives` - Controls the use of TCP keepalive. A value of 0 disables keepalive and nonzero integers enable it. /// This option is ignored when connecting with Unix sockets. Defaults to on. /// * `keepalives_idle` - The number of seconds of inactivity after which a keepalive message is sent to the server. /// This option is ignored when connecting with Unix sockets. Defaults to 2 hours. +/// * `keepalives_interval` - The time interval between TCP keepalive probes. +/// This option is ignored when connecting with Unix sockets. +/// * `keepalives_retries` - The maximum number of TCP keepalive probes that will be sent before dropping a connection. +/// This option is ignored when connecting with Unix sockets. /// * `target_session_attrs` - Specifies requirements of the session. If set to `read-write`, the client will check that /// the `transaction_read_write` session parameter is set to `on`. This can be used to connect to the primary server /// in a database cluster as opposed to the secondary read-only mirrors. Defaults to `all`. /// * `channel_binding` - Controls usage of channel binding in the authentication process. If set to `disable`, channel /// binding will not be used. If set to `prefer`, channel binding will be used if available, but not used otherwise. /// If set to `require`, the authentication process will fail if channel binding is not used. Defaults to `prefer`. +/// * `load_balance_hosts` - Controls the order in which the client tries to connect to the available hosts and +/// addresses. Once a connection attempt is successful no other hosts and addresses will be tried. This parameter +/// is typically used in combination with multiple host names or a DNS record that returns multiple IPs. If set to +/// `disable`, hosts and addresses will be tried in the order provided. If set to `random`, hosts will be tried +/// in a random order, and the IP addresses resolved from a hostname will also be tried in a random order. Defaults +/// to `disable`. /// /// ## Examples /// @@ -117,6 +184,10 @@ pub enum Host { /// ``` /// /// ```not_rust +/// host=host1,host2,host3 port=1234,,5678 hostaddr=127.0.0.1,127.0.0.2,127.0.0.3 user=postgres target_session_attrs=read-write +/// ``` +/// +/// ```not_rust /// host=host1,host2,host3 port=1234,,5678 user=postgres target_session_attrs=read-write /// ``` /// @@ -144,7 +215,7 @@ pub enum Host { /// ```not_rust /// postgresql:///mydb?user=user&host=/var/lib/postgresql /// ``` -#[derive(PartialEq, Clone)] +#[derive(Clone, PartialEq, Eq)] pub struct Config { pub(crate) user: Option, pub(crate) password: Option>, @@ -152,13 +223,18 @@ pub struct Config { pub(crate) options: Option, pub(crate) application_name: Option, pub(crate) ssl_mode: SslMode, + pub(crate) ssl_negotiation: SslNegotiation, pub(crate) host: Vec, + pub(crate) hostaddr: Vec, pub(crate) port: Vec, pub(crate) connect_timeout: Option, + pub(crate) tcp_user_timeout: Option, pub(crate) keepalives: bool, - pub(crate) keepalives_idle: Duration, + #[cfg(not(target_arch = "wasm32"))] + pub(crate) keepalive_config: KeepaliveConfig, pub(crate) target_session_attrs: TargetSessionAttrs, pub(crate) channel_binding: ChannelBinding, + pub(crate) load_balance_hosts: LoadBalanceHosts, } impl Default for Config { @@ -177,21 +253,30 @@ impl Config { options: None, application_name: None, ssl_mode: SslMode::Prefer, + ssl_negotiation: SslNegotiation::Postgres, host: vec![], + hostaddr: vec![], port: vec![], connect_timeout: None, + tcp_user_timeout: None, keepalives: true, - keepalives_idle: Duration::from_secs(2 * 60 * 60), + #[cfg(not(target_arch = "wasm32"))] + keepalive_config: KeepaliveConfig { + idle: Duration::from_secs(2 * 60 * 60), + interval: None, + retries: None, + }, target_session_attrs: TargetSessionAttrs::Any, channel_binding: ChannelBinding::Prefer, + load_balance_hosts: LoadBalanceHosts::Disable, } } /// Sets the user to authenticate with. /// - /// Required. - pub fn user(&mut self, user: &str) -> &mut Config { - self.user = Some(user.to_string()); + /// Defaults to the user executing this process. + pub fn user(&mut self, user: impl Into) -> &mut Config { + self.user = Some(user.into()); self } @@ -219,8 +304,8 @@ impl Config { /// Sets the name of the database to connect to. /// /// Defaults to the user. - pub fn dbname(&mut self, dbname: &str) -> &mut Config { - self.dbname = Some(dbname.to_string()); + pub fn dbname(&mut self, dbname: impl Into) -> &mut Config { + self.dbname = Some(dbname.into()); self } @@ -231,8 +316,8 @@ impl Config { } /// Sets command line options used to configure the server. - pub fn options(&mut self, options: &str) -> &mut Config { - self.options = Some(options.to_string()); + pub fn options(&mut self, options: impl Into) -> &mut Config { + self.options = Some(options.into()); self } @@ -243,8 +328,8 @@ impl Config { } /// Sets the value of the `application_name` runtime parameter. - pub fn application_name(&mut self, application_name: &str) -> &mut Config { - self.application_name = Some(application_name.to_string()); + pub fn application_name(&mut self, application_name: impl Into) -> &mut Config { + self.application_name = Some(application_name.into()); self } @@ -267,11 +352,27 @@ impl Config { self.ssl_mode } + /// Sets the SSL negotiation method. + /// + /// Defaults to `postgres`. + pub fn ssl_negotiation(&mut self, ssl_negotiation: SslNegotiation) -> &mut Config { + self.ssl_negotiation = ssl_negotiation; + self + } + + /// Gets the SSL negotiation method. + pub fn get_ssl_negotiation(&self) -> SslNegotiation { + self.ssl_negotiation + } + /// Adds a host to the configuration. /// /// Multiple hosts can be specified by calling this method multiple times, and each will be tried in order. On Unix /// systems, a host starting with a `/` is interpreted as a path to a directory containing Unix domain sockets. - pub fn host(&mut self, host: &str) -> &mut Config { + /// There must be either no hosts, or the same number of hosts as hostaddrs. + pub fn host(&mut self, host: impl Into) -> &mut Config { + let host = host.into(); + #[cfg(unix)] { if host.starts_with('/') { @@ -279,7 +380,7 @@ impl Config { } } - self.host.push(Host::Tcp(host.to_string())); + self.host.push(Host::Tcp(host)); self } @@ -288,6 +389,11 @@ impl Config { &self.host } + /// Gets the hostaddrs that have been added to the configuration with `hostaddr`. + pub fn get_hostaddrs(&self) -> &[IpAddr] { + self.hostaddr.deref() + } + /// Adds a Unix socket host to the configuration. /// /// Unlike `host`, this method allows non-UTF8 paths. @@ -300,6 +406,15 @@ impl Config { self } + /// Adds a hostaddr to the configuration. + /// + /// Multiple hostaddrs can be specified by calling this method multiple times, and each will be tried in order. + /// There must be either no hostaddrs, or the same number of hostaddrs as hosts. + pub fn hostaddr(&mut self, hostaddr: IpAddr) -> &mut Config { + self.hostaddr.push(hostaddr); + self + } + /// Adds a port to the configuration. /// /// Multiple ports can be specified by calling this method multiple times. There must either be no ports, in which @@ -330,6 +445,22 @@ impl Config { self.connect_timeout.as_ref() } + /// Sets the TCP user timeout. + /// + /// This is ignored for Unix domain socket connections. It is only supported on systems where + /// TCP_USER_TIMEOUT is available and will default to the system default if omitted or set to 0; + /// on other systems, it has no effect. + pub fn tcp_user_timeout(&mut self, tcp_user_timeout: Duration) -> &mut Config { + self.tcp_user_timeout = Some(tcp_user_timeout); + self + } + + /// Gets the TCP user timeout, if one has been set with the + /// `user_timeout` method. + pub fn get_tcp_user_timeout(&self) -> Option<&Duration> { + self.tcp_user_timeout.as_ref() + } + /// Controls the use of TCP keepalive. /// /// This is ignored for Unix domain socket connections. Defaults to `true`. @@ -346,15 +477,48 @@ impl Config { /// Sets the amount of idle time before a keepalive packet is sent on the connection. /// /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. Defaults to 2 hours. + #[cfg(not(target_arch = "wasm32"))] pub fn keepalives_idle(&mut self, keepalives_idle: Duration) -> &mut Config { - self.keepalives_idle = keepalives_idle; + self.keepalive_config.idle = keepalives_idle; self } /// Gets the configured amount of idle time before a keepalive packet will /// be sent on the connection. + #[cfg(not(target_arch = "wasm32"))] pub fn get_keepalives_idle(&self) -> Duration { - self.keepalives_idle + self.keepalive_config.idle + } + + /// Sets the time interval between TCP keepalive probes. + /// On Windows, this sets the value of the tcp_keepalive struct’s keepaliveinterval field. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + #[cfg(not(target_arch = "wasm32"))] + pub fn keepalives_interval(&mut self, keepalives_interval: Duration) -> &mut Config { + self.keepalive_config.interval = Some(keepalives_interval); + self + } + + /// Gets the time interval between TCP keepalive probes. + #[cfg(not(target_arch = "wasm32"))] + pub fn get_keepalives_interval(&self) -> Option { + self.keepalive_config.interval + } + + /// Sets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + /// + /// This is ignored for Unix domain sockets, or if the `keepalives` option is disabled. + #[cfg(not(target_arch = "wasm32"))] + pub fn keepalives_retries(&mut self, keepalives_retries: u32) -> &mut Config { + self.keepalive_config.retries = Some(keepalives_retries); + self + } + + /// Gets the maximum number of TCP keepalive probes that will be sent before dropping a connection. + #[cfg(not(target_arch = "wasm32"))] + pub fn get_keepalives_retries(&self) -> Option { + self.keepalive_config.retries } /// Sets the requirements of the session. @@ -387,22 +551,35 @@ impl Config { self.channel_binding } + /// Sets the host load balancing behavior. + /// + /// Defaults to `disable`. + pub fn load_balance_hosts(&mut self, load_balance_hosts: LoadBalanceHosts) -> &mut Config { + self.load_balance_hosts = load_balance_hosts; + self + } + + /// Gets the host load balancing behavior. + pub fn get_load_balance_hosts(&self) -> LoadBalanceHosts { + self.load_balance_hosts + } + fn param(&mut self, key: &str, value: &str) -> Result<(), Error> { match key { "user" => { - self.user(&value); + self.user(value); } "password" => { self.password(value); } "dbname" => { - self.dbname(&value); + self.dbname(value); } "options" => { - self.options(&value); + self.options(value); } "application_name" => { - self.application_name(&value); + self.application_name(value); } "sslmode" => { let mode = match value { @@ -413,11 +590,31 @@ impl Config { }; self.ssl_mode(mode); } + "sslnegotiation" => { + let mode = match value { + "postgres" => SslNegotiation::Postgres, + "direct" => SslNegotiation::Direct, + _ => { + return Err(Error::config_parse(Box::new(InvalidValue( + "sslnegotiation", + )))) + } + }; + self.ssl_negotiation(mode); + } "host" => { for host in value.split(',') { self.host(host); } } + "hostaddr" => { + for hostaddr in value.split(',') { + let addr = hostaddr + .parse() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("hostaddr"))))?; + self.hostaddr(addr); + } + } "port" => { for port in value.split(',') { let port = if port.is_empty() { @@ -437,12 +634,22 @@ impl Config { self.connect_timeout(Duration::from_secs(timeout as u64)); } } + "tcp_user_timeout" => { + let timeout = value + .parse::() + .map_err(|_| Error::config_parse(Box::new(InvalidValue("tcp_user_timeout"))))?; + if timeout > 0 { + self.tcp_user_timeout(Duration::from_secs(timeout as u64)); + } + } + #[cfg(not(target_arch = "wasm32"))] "keepalives" => { let keepalives = value .parse::() .map_err(|_| Error::config_parse(Box::new(InvalidValue("keepalives"))))?; self.keepalives(keepalives != 0); } + #[cfg(not(target_arch = "wasm32"))] "keepalives_idle" => { let keepalives_idle = value .parse::() @@ -451,10 +658,27 @@ impl Config { self.keepalives_idle(Duration::from_secs(keepalives_idle as u64)); } } + #[cfg(not(target_arch = "wasm32"))] + "keepalives_interval" => { + let keepalives_interval = value.parse::().map_err(|_| { + Error::config_parse(Box::new(InvalidValue("keepalives_interval"))) + })?; + if keepalives_interval > 0 { + self.keepalives_interval(Duration::from_secs(keepalives_interval as u64)); + } + } + #[cfg(not(target_arch = "wasm32"))] + "keepalives_retries" => { + let keepalives_retries = value.parse::().map_err(|_| { + Error::config_parse(Box::new(InvalidValue("keepalives_retries"))) + })?; + self.keepalives_retries(keepalives_retries); + } "target_session_attrs" => { - let target_session_attrs = match &*value { + let target_session_attrs = match value { "any" => TargetSessionAttrs::Any, "read-write" => TargetSessionAttrs::ReadWrite, + "read-only" => TargetSessionAttrs::ReadOnly, _ => { return Err(Error::config_parse(Box::new(InvalidValue( "target_session_attrs", @@ -476,6 +700,18 @@ impl Config { }; self.channel_binding(channel_binding); } + "load_balance_hosts" => { + let load_balance_hosts = match value { + "disable" => LoadBalanceHosts::Disable, + "random" => LoadBalanceHosts::Random, + _ => { + return Err(Error::config_parse(Box::new(InvalidValue( + "load_balance_hosts", + )))) + } + }; + self.load_balance_hosts(load_balance_hosts); + } key => { return Err(Error::config_parse(Box::new(UnknownOption( key.to_string(), @@ -509,7 +745,7 @@ impl Config { S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - connect_raw(stream, tls, self).await + connect_raw(stream, tls, true, self).await } } @@ -534,7 +770,8 @@ impl fmt::Debug for Config { } } - f.debug_struct("Config") + let mut config_dbg = &mut f.debug_struct("Config"); + config_dbg = config_dbg .field("user", &self.user) .field("password", &self.password.as_ref().map(|_| Redaction {})) .field("dbname", &self.dbname) @@ -542,12 +779,24 @@ impl fmt::Debug for Config { .field("application_name", &self.application_name) .field("ssl_mode", &self.ssl_mode) .field("host", &self.host) + .field("hostaddr", &self.hostaddr) .field("port", &self.port) .field("connect_timeout", &self.connect_timeout) - .field("keepalives", &self.keepalives) - .field("keepalives_idle", &self.keepalives_idle) + .field("tcp_user_timeout", &self.tcp_user_timeout) + .field("keepalives", &self.keepalives); + + #[cfg(not(target_arch = "wasm32"))] + { + config_dbg = config_dbg + .field("keepalives_idle", &self.keepalive_config.idle) + .field("keepalives_interval", &self.keepalive_config.interval) + .field("keepalives_retries", &self.keepalive_config.retries); + } + + config_dbg .field("target_session_attrs", &self.target_session_attrs) .field("channel_binding", &self.channel_binding) + .field("load_balance_hosts", &self.load_balance_hosts) .finish() } } @@ -760,8 +1009,8 @@ impl<'a> UrlParser<'a> { fn remove_url_prefix(s: &str) -> Option<&str> { for prefix in &["postgres://", "postgresql://"] { - if s.starts_with(prefix) { - return Some(&s[prefix.len()..]); + if let Some(stripped) = s.strip_prefix(prefix) { + return Some(stripped); } } @@ -780,7 +1029,7 @@ impl<'a> UrlParser<'a> { } fn take_all(&mut self) -> &'a str { - mem::replace(&mut self.s, "") + mem::take(&mut self.s) } fn eat_byte(&mut self) { @@ -796,7 +1045,7 @@ impl<'a> UrlParser<'a> { let mut it = creds.splitn(2, ':'); let user = self.decode(it.next().unwrap())?; - self.config.user(&user); + self.config.user(user); if let Some(password) = it.next() { let password = Cow::from(percent_encoding::percent_decode(password.as_bytes())); @@ -825,8 +1074,8 @@ impl<'a> UrlParser<'a> { let host = &chunk[1..idx]; let remaining = &chunk[idx + 1..]; - let port = if remaining.starts_with(':') { - Some(&remaining[1..]) + let port = if let Some(port) = remaining.strip_prefix(':') { + Some(port) } else if remaining.is_empty() { None } else { @@ -859,7 +1108,7 @@ impl<'a> UrlParser<'a> { }; if !dbname.is_empty() { - self.config.dbname(&self.decode(dbname)?); + self.config.dbname(self.decode(dbname)?); } Ok(()) @@ -900,7 +1149,7 @@ impl<'a> UrlParser<'a> { #[cfg(unix)] fn host_param(&mut self, s: &str) -> Result<(), Error> { let decoded = Cow::from(percent_encoding::percent_decode(s.as_bytes())); - if decoded.get(0) == Some(&b'/') { + if decoded.first() == Some(&b'/') { self.config.host_path(OsStr::from_bytes(&decoded)); } else { let decoded = str::from_utf8(&decoded).map_err(|e| Error::config_parse(Box::new(e)))?; @@ -922,3 +1171,41 @@ impl<'a> UrlParser<'a> { .map_err(|e| Error::config_parse(e.into())) } } + +#[cfg(test)] +mod tests { + use std::net::IpAddr; + + use crate::{config::Host, Config}; + + #[test] + fn test_simple_parsing() { + let s = "user=pass_user dbname=postgres host=host1,host2 hostaddr=127.0.0.1,127.0.0.2 port=26257"; + let config = s.parse::().unwrap(); + assert_eq!(Some("pass_user"), config.get_user()); + assert_eq!(Some("postgres"), config.get_dbname()); + assert_eq!( + [ + Host::Tcp("host1".to_string()), + Host::Tcp("host2".to_string()) + ], + config.get_hosts(), + ); + + assert_eq!( + [ + "127.0.0.1".parse::().unwrap(), + "127.0.0.2".parse::().unwrap() + ], + config.get_hostaddrs(), + ); + + assert_eq!(1, 1); + } + + #[test] + fn test_invalid_hostaddr_parsing() { + let s = "user=pass_user dbname=postgres host=host1 hostaddr=127.0.0 port=26257"; + s.parse::().err().unwrap(); + } +} diff --git a/tokio-postgres/src/connect.rs b/tokio-postgres/src/connect.rs index db2ddc85f..e97a7a2a3 100644 --- a/tokio-postgres/src/connect.rs +++ b/tokio-postgres/src/connect.rs @@ -1,12 +1,14 @@ -use crate::client::SocketConfig; -use crate::config::{Host, TargetSessionAttrs}; +use crate::client::{Addr, SocketConfig}; +use crate::config::{Host, LoadBalanceHosts, TargetSessionAttrs}; use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; -use crate::tls::{MakeTlsConnect, TlsConnect}; +use crate::tls::MakeTlsConnect; use crate::{Client, Config, Connection, Error, SimpleQueryMessage, Socket}; -use futures::{future, pin_mut, Future, FutureExt, Stream}; -use std::io; +use futures_util::{future, pin_mut, Future, FutureExt, Stream}; +use rand::seq::SliceRandom; use std::task::Poll; +use std::{cmp, io}; +use tokio::net; pub async fn connect( mut tls: T, @@ -15,34 +17,64 @@ pub async fn connect( where T: MakeTlsConnect, { - if config.host.is_empty() { - return Err(Error::config("host missing".into())); + if config.host.is_empty() && config.hostaddr.is_empty() { + return Err(Error::config("both host and hostaddr are missing".into())); } - if config.port.len() > 1 && config.port.len() != config.host.len() { + if !config.host.is_empty() + && !config.hostaddr.is_empty() + && config.host.len() != config.hostaddr.len() + { + let msg = format!( + "number of hosts ({}) is different from number of hostaddrs ({})", + config.host.len(), + config.hostaddr.len(), + ); + return Err(Error::config(msg.into())); + } + + // At this point, either one of the following two scenarios could happen: + // (1) either config.host or config.hostaddr must be empty; + // (2) if both config.host and config.hostaddr are NOT empty; their lengths must be equal. + let num_hosts = cmp::max(config.host.len(), config.hostaddr.len()); + + if config.port.len() > 1 && config.port.len() != num_hosts { return Err(Error::config("invalid number of ports".into())); } + let mut indices = (0..num_hosts).collect::>(); + if config.load_balance_hosts == LoadBalanceHosts::Random { + indices.shuffle(&mut rand::rng()); + } + let mut error = None; - for (i, host) in config.host.iter().enumerate() { - let port = *config + for i in indices { + let host = config.host.get(i); + let hostaddr = config.hostaddr.get(i); + let port = config .port .get(i) - .or_else(|| config.port.get(0)) - .unwrap_or(&5432); + .or_else(|| config.port.first()) + .copied() + .unwrap_or(5432); + // The value of host is used as the hostname for TLS validation, let hostname = match host { - Host::Tcp(host) => &**host, + Some(Host::Tcp(host)) => Some(host.clone()), // postgres doesn't support TLS over unix sockets, so the choice here doesn't matter #[cfg(unix)] - Host::Unix(_) => "", + Some(Host::Unix(_)) => None, + None => None, }; - let tls = tls - .make_tls_connect(hostname) - .map_err(|e| Error::tls(e.into()))?; + // Try to use the value of hostaddr to establish the TCP connection, + // fallback to host if hostaddr is not present. + let addr = match hostaddr { + Some(ipaddr) => Host::Tcp(ipaddr.to_string()), + None => host.cloned().unwrap(), + }; - match connect_once(host, port, tls, config).await { + match connect_host(addr, hostname, port, &mut tls, config).await { Ok((client, connection)) => return Ok((client, connection)), Err(e) => error = Some(e), } @@ -51,26 +83,84 @@ where Err(error.unwrap()) } +async fn connect_host( + host: Host, + hostname: Option, + port: u16, + tls: &mut T, + config: &Config, +) -> Result<(Client, Connection), Error> +where + T: MakeTlsConnect, +{ + match host { + Host::Tcp(host) => { + let mut addrs = net::lookup_host((&*host, port)) + .await + .map_err(Error::connect)? + .collect::>(); + + if config.load_balance_hosts == LoadBalanceHosts::Random { + addrs.shuffle(&mut rand::rng()); + } + + let mut last_err = None; + for addr in addrs { + match connect_once(Addr::Tcp(addr.ip()), hostname.as_deref(), port, tls, config) + .await + { + Ok(stream) => return Ok(stream), + Err(e) => { + last_err = Some(e); + continue; + } + }; + } + + Err(last_err.unwrap_or_else(|| { + Error::connect(io::Error::new( + io::ErrorKind::InvalidInput, + "could not resolve any addresses", + )) + })) + } + #[cfg(unix)] + Host::Unix(path) => { + connect_once(Addr::Unix(path), hostname.as_deref(), port, tls, config).await + } + } +} + async fn connect_once( - host: &Host, + addr: Addr, + hostname: Option<&str>, port: u16, - tls: T, + tls: &mut T, config: &Config, ) -> Result<(Client, Connection), Error> where - T: TlsConnect, + T: MakeTlsConnect, { let socket = connect_socket( - host, + &addr, port, config.connect_timeout, - config.keepalives, - config.keepalives_idle, + config.tcp_user_timeout, + if config.keepalives { + Some(&config.keepalive_config) + } else { + None + }, ) .await?; - let (mut client, mut connection) = connect_raw(socket, tls, config).await?; - if let TargetSessionAttrs::ReadWrite = config.target_session_attrs { + let tls = tls + .make_tls_connect(hostname.unwrap_or("")) + .map_err(|e| Error::tls(e.into()))?; + let has_hostname = hostname.is_some(); + let (mut client, mut connection) = connect_raw(socket, tls, has_hostname, config).await?; + + if config.target_session_attrs != TargetSessionAttrs::Any { let rows = client.simple_query_raw("SHOW transaction_read_only"); pin_mut!(rows); @@ -95,11 +185,21 @@ where match next.await.transpose()? { Some(SimpleQueryMessage::Row(row)) => { - if row.try_get(0)? == Some("on") { + let read_only_result = row.try_get(0)?; + if read_only_result == Some("on") + && config.target_session_attrs == TargetSessionAttrs::ReadWrite + { return Err(Error::connect(io::Error::new( io::ErrorKind::PermissionDenied, "database does not allow writes", ))); + } else if read_only_result == Some("off") + && config.target_session_attrs == TargetSessionAttrs::ReadOnly + { + return Err(Error::connect(io::Error::new( + io::ErrorKind::PermissionDenied, + "database is not read only", + ))); } else { break; } @@ -111,11 +211,16 @@ where } client.set_socket_config(SocketConfig { - host: host.clone(), + addr, + hostname: hostname.map(|s| s.to_string()), port, connect_timeout: config.connect_timeout, - keepalives: config.keepalives, - keepalives_idle: config.keepalives_idle, + tcp_user_timeout: config.tcp_user_timeout, + keepalive: if config.keepalives { + Some(config.keepalive_config.clone()) + } else { + None + }, }); Ok((client, connection)) diff --git a/tokio-postgres/src/connect_raw.rs b/tokio-postgres/src/connect_raw.rs index d07d5a2df..cf7476cab 100644 --- a/tokio-postgres/src/connect_raw.rs +++ b/tokio-postgres/src/connect_raw.rs @@ -6,13 +6,14 @@ use crate::tls::{TlsConnect, TlsStream}; use crate::{Client, Connection, Error}; use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use futures::channel::mpsc; -use futures::{ready, Sink, SinkExt, Stream, TryStreamExt}; +use futures_channel::mpsc; +use futures_util::{ready, Sink, SinkExt, Stream, TryStreamExt}; use postgres_protocol::authentication; use postgres_protocol::authentication::sasl; use postgres_protocol::authentication::sasl::ScramSha256; use postgres_protocol::message::backend::{AuthenticationSaslBody, Message}; use postgres_protocol::message::frontend; +use std::borrow::Cow; use std::collections::{HashMap, VecDeque}; use std::io; use std::pin::Pin; @@ -81,13 +82,21 @@ where pub async fn connect_raw( stream: S, tls: T, + has_hostname: bool, config: &Config, ) -> Result<(Client, Connection), Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - let stream = connect_tls(stream, config.ssl_mode, tls).await?; + let stream = connect_tls( + stream, + config.ssl_mode, + config.ssl_negotiation, + tls, + has_hostname, + ) + .await?; let mut stream = StartupStream { inner: Framed::new(stream, PostgresCodec), @@ -95,26 +104,39 @@ where delayed: VecDeque::new(), }; - startup(&mut stream, config).await?; - authenticate(&mut stream, config).await?; + let user = config + .user + .as_deref() + .map_or_else(|| Cow::Owned(whoami::username()), Cow::Borrowed); + + startup(&mut stream, config, &user).await?; + authenticate(&mut stream, config, &user).await?; let (process_id, secret_key, parameters) = read_info(&mut stream).await?; let (sender, receiver) = mpsc::unbounded(); - let client = Client::new(sender, config.ssl_mode, process_id, secret_key); + let client = Client::new( + sender, + config.ssl_mode, + config.ssl_negotiation, + process_id, + secret_key, + ); let connection = Connection::new(stream.inner, stream.delayed, parameters, receiver); Ok((client, connection)) } -async fn startup(stream: &mut StartupStream, config: &Config) -> Result<(), Error> +async fn startup( + stream: &mut StartupStream, + config: &Config, + user: &str, +) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin, { - let mut params = vec![("client_encoding", "UTF8"), ("timezone", "UTC")]; - if let Some(user) = &config.user { - params.push(("user", &**user)); - } + let mut params = vec![("client_encoding", "UTF8")]; + params.push(("user", user)); if let Some(dbname) = &config.dbname { params.push(("database", &**dbname)); } @@ -134,7 +156,11 @@ where .map_err(Error::io) } -async fn authenticate(stream: &mut StartupStream, config: &Config) -> Result<(), Error> +async fn authenticate( + stream: &mut StartupStream, + config: &Config, + user: &str, +) -> Result<(), Error> where S: AsyncRead + AsyncWrite + Unpin, T: TlsStream + Unpin, @@ -157,10 +183,6 @@ where Some(Message::AuthenticationMd5Password(body)) => { can_skip_channel_binding(config)?; - let user = config - .user - .as_ref() - .ok_or_else(|| Error::config("user missing".into()))?; let pass = config .password .as_ref() diff --git a/tokio-postgres/src/connect_socket.rs b/tokio-postgres/src/connect_socket.rs index 2d56a2ed5..26184701f 100644 --- a/tokio-postgres/src/connect_socket.rs +++ b/tokio-postgres/src/connect_socket.rs @@ -1,5 +1,7 @@ -use crate::config::Host; +use crate::client::Addr; +use crate::keepalive::KeepaliveConfig; use crate::{Error, Socket}; +use socket2::{SockRef, TcpKeepalive}; use std::future::Future; use std::io; use std::time::Duration; @@ -9,28 +11,41 @@ use tokio::net::UnixStream; use tokio::time; pub(crate) async fn connect_socket( - host: &Host, + addr: &Addr, port: u16, connect_timeout: Option, - keepalives: bool, - keepalives_idle: Duration, + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] tcp_user_timeout: Option< + Duration, + >, + keepalive_config: Option<&KeepaliveConfig>, ) -> Result { - match host { - Host::Tcp(host) => { - let socket = - connect_with_timeout(TcpStream::connect((&**host, port)), connect_timeout).await?; - socket.set_nodelay(true).map_err(Error::connect)?; - if keepalives { - socket - .set_keepalive(Some(keepalives_idle)) + match addr { + Addr::Tcp(ip) => { + let stream = + connect_with_timeout(TcpStream::connect((*ip, port)), connect_timeout).await?; + + stream.set_nodelay(true).map_err(Error::connect)?; + + let sock_ref = SockRef::from(&stream); + + #[cfg(target_os = "linux")] + if let Some(tcp_user_timeout) = tcp_user_timeout { + sock_ref + .set_tcp_user_timeout(Some(tcp_user_timeout)) + .map_err(Error::connect)?; + } + + if let Some(keepalive_config) = keepalive_config { + sock_ref + .set_tcp_keepalive(&TcpKeepalive::from(keepalive_config)) .map_err(Error::connect)?; } - Ok(Socket::new_tcp(socket)) + Ok(Socket::new_tcp(stream)) } #[cfg(unix)] - Host::Unix(path) => { - let path = path.join(format!(".s.PGSQL.{}", port)); + Addr::Unix(dir) => { + let path = dir.join(format!(".s.PGSQL.{}", port)); let socket = connect_with_timeout(UnixStream::connect(path), connect_timeout).await?; Ok(Socket::new_unix(socket)) } diff --git a/tokio-postgres/src/connect_tls.rs b/tokio-postgres/src/connect_tls.rs index 5ef21ac5c..d220cd3b5 100644 --- a/tokio-postgres/src/connect_tls.rs +++ b/tokio-postgres/src/connect_tls.rs @@ -1,4 +1,4 @@ -use crate::config::SslMode; +use crate::config::{SslMode, SslNegotiation}; use crate::maybe_tls_stream::MaybeTlsStream; use crate::tls::private::ForcePrivateApi; use crate::tls::TlsConnect; @@ -10,7 +10,9 @@ use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; pub async fn connect_tls( mut stream: S, mode: SslMode, + negotiation: SslNegotiation, tls: T, + has_hostname: bool, ) -> Result, Error> where S: AsyncRead + AsyncWrite + Unpin, @@ -21,24 +23,33 @@ where SslMode::Prefer if !tls.can_connect(ForcePrivateApi) => { return Ok(MaybeTlsStream::Raw(stream)) } + SslMode::Prefer if negotiation == SslNegotiation::Direct => { + return Err(Error::tls("weak sslmode \"prefer\" may not be used with sslnegotiation=direct (use \"require\", \"verify-ca\", or \"verify-full\")".into())) + } SslMode::Prefer | SslMode::Require => {} } - let mut buf = BytesMut::new(); - frontend::ssl_request(&mut buf); - stream.write_all(&buf).await.map_err(Error::io)?; + if negotiation == SslNegotiation::Postgres { + let mut buf = BytesMut::new(); + frontend::ssl_request(&mut buf); + stream.write_all(&buf).await.map_err(Error::io)?; - let mut buf = [0]; - stream.read_exact(&mut buf).await.map_err(Error::io)?; + let mut buf = [0]; + stream.read_exact(&mut buf).await.map_err(Error::io)?; - if buf[0] != b'S' { - if SslMode::Require == mode { - return Err(Error::tls("server does not support TLS".into())); - } else { - return Ok(MaybeTlsStream::Raw(stream)); + if buf[0] != b'S' { + if SslMode::Require == mode { + return Err(Error::tls("server does not support TLS".into())); + } else { + return Ok(MaybeTlsStream::Raw(stream)); + } } } + if !has_hostname { + return Err(Error::tls("no hostname provided for TLS handshake".into())); + } + let stream = tls .connect(stream) .await diff --git a/tokio-postgres/src/connection.rs b/tokio-postgres/src/connection.rs index 9c8e369f1..414335955 100644 --- a/tokio-postgres/src/connection.rs +++ b/tokio-postgres/src/connection.rs @@ -5,9 +5,8 @@ use crate::maybe_tls_stream::MaybeTlsStream; use crate::{AsyncMessage, Error, Notification}; use bytes::BytesMut; use fallible_iterator::FallibleIterator; -use futures::channel::mpsc; -use futures::stream::FusedStream; -use futures::{ready, Sink, Stream, StreamExt}; +use futures_channel::mpsc; +use futures_util::{ready, stream::FusedStream, Sink, Stream, StreamExt}; use log::{info, trace}; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -200,9 +199,10 @@ where return Ok(false); } - if let Poll::Pending = Pin::new(&mut self.stream) + if Pin::new(&mut self.stream) .poll_ready(cx) .map_err(Error::io)? + .is_pending() { trace!("poll_write: waiting on socket"); return Ok(false); @@ -302,6 +302,9 @@ where /// /// The server can send notices as well as notifications asynchronously to the client. Applications that wish to /// examine those messages should use this method to drive the connection rather than its `Future` implementation. + /// + /// Return values of `None` or `Some(Err(_))` are "terminal"; callers should not invoke this method again after + /// receiving one of those values. pub fn poll_message( &mut self, cx: &mut Context<'_>, diff --git a/tokio-postgres/src/copy_in.rs b/tokio-postgres/src/copy_in.rs index fc712f6db..59e31fea6 100644 --- a/tokio-postgres/src/copy_in.rs +++ b/tokio-postgres/src/copy_in.rs @@ -1,12 +1,11 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; +use crate::query::extract_row_affected; use crate::{query, slice_iter, Error, Statement}; -use bytes::buf::BufExt; use bytes::{Buf, BufMut, BytesMut}; -use futures::channel::mpsc; -use futures::future; -use futures::{ready, Sink, SinkExt, Stream, StreamExt}; +use futures_channel::mpsc; +use futures_util::{future, ready, Sink, SinkExt, Stream, StreamExt}; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; @@ -112,14 +111,7 @@ where let this = self.as_mut().project(); match ready!(this.responses.poll_next(cx))? { Message::CommandComplete(body) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); + let rows = extract_row_affected(&body)?; return Poll::Ready(Ok(rows)); } _ => return Poll::Ready(Err(Error::unexpected_message())), diff --git a/tokio-postgres/src/copy_out.rs b/tokio-postgres/src/copy_out.rs index 52691b963..1e6949252 100644 --- a/tokio-postgres/src/copy_out.rs +++ b/tokio-postgres/src/copy_out.rs @@ -3,7 +3,7 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::{query, slice_iter, Error, Statement}; use bytes::Bytes; -use futures::{ready, Stream}; +use futures_util::{ready, Stream}; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; diff --git a/tokio-postgres/src/error/mod.rs b/tokio-postgres/src/error/mod.rs index 788e70cf4..75664d258 100644 --- a/tokio-postgres/src/error/mod.rs +++ b/tokio-postgres/src/error/mod.rs @@ -107,14 +107,15 @@ impl DbError { let mut routine = None; while let Some(field) = fields.next()? { + let value = String::from_utf8_lossy(field.value_bytes()); match field.type_() { - b'S' => severity = Some(field.value().to_owned()), - b'C' => code = Some(SqlState::from_code(field.value())), - b'M' => message = Some(field.value().to_owned()), - b'D' => detail = Some(field.value().to_owned()), - b'H' => hint = Some(field.value().to_owned()), + b'S' => severity = Some(value.into_owned()), + b'C' => code = Some(SqlState::from_code(&value)), + b'M' => message = Some(value.into_owned()), + b'D' => detail = Some(value.into_owned()), + b'H' => hint = Some(value.into_owned()), b'P' => { - normal_position = Some(field.value().parse::().map_err(|_| { + normal_position = Some(value.parse::().map_err(|_| { io::Error::new( io::ErrorKind::InvalidInput, "`P` field did not contain an integer", @@ -122,32 +123,32 @@ impl DbError { })?); } b'p' => { - internal_position = Some(field.value().parse::().map_err(|_| { + internal_position = Some(value.parse::().map_err(|_| { io::Error::new( io::ErrorKind::InvalidInput, "`p` field did not contain an integer", ) })?); } - b'q' => internal_query = Some(field.value().to_owned()), - b'W' => where_ = Some(field.value().to_owned()), - b's' => schema = Some(field.value().to_owned()), - b't' => table = Some(field.value().to_owned()), - b'c' => column = Some(field.value().to_owned()), - b'd' => datatype = Some(field.value().to_owned()), - b'n' => constraint = Some(field.value().to_owned()), - b'F' => file = Some(field.value().to_owned()), + b'q' => internal_query = Some(value.into_owned()), + b'W' => where_ = Some(value.into_owned()), + b's' => schema = Some(value.into_owned()), + b't' => table = Some(value.into_owned()), + b'c' => column = Some(value.into_owned()), + b'd' => datatype = Some(value.into_owned()), + b'n' => constraint = Some(value.into_owned()), + b'F' => file = Some(value.into_owned()), b'L' => { - line = Some(field.value().parse::().map_err(|_| { + line = Some(value.parse::().map_err(|_| { io::Error::new( io::ErrorKind::InvalidInput, "`L` field did not contain an integer", ) })?); } - b'R' => routine = Some(field.value().to_owned()), + b'R' => routine = Some(value.into_owned()), b'V' => { - parsed_severity = Some(Severity::from_str(field.value()).ok_or_else(|| { + parsed_severity = Some(Severity::from_str(&value).ok_or_else(|| { io::Error::new( io::ErrorKind::InvalidInput, "`V` field contained an invalid value", @@ -224,7 +225,7 @@ impl DbError { /// /// Might run to multiple lines. pub fn detail(&self) -> Option<&str> { - self.detail.as_ref().map(|s| &**s) + self.detail.as_deref() } /// An optional suggestion what to do about the problem. @@ -233,7 +234,7 @@ impl DbError { /// (potentially inappropriate) rather than hard facts. Might run to /// multiple lines. pub fn hint(&self) -> Option<&str> { - self.hint.as_ref().map(|s| &**s) + self.hint.as_deref() } /// An optional error cursor position into either the original query string @@ -248,20 +249,20 @@ impl DbError { /// language functions and internally-generated queries. The trace is one /// entry per line, most recent first. pub fn where_(&self) -> Option<&str> { - self.where_.as_ref().map(|s| &**s) + self.where_.as_deref() } /// If the error was associated with a specific database object, the name /// of the schema containing that object, if any. (PostgreSQL 9.3+) pub fn schema(&self) -> Option<&str> { - self.schema.as_ref().map(|s| &**s) + self.schema.as_deref() } /// If the error was associated with a specific table, the name of the /// table. (Refer to the schema name field for the name of the table's /// schema.) (PostgreSQL 9.3+) pub fn table(&self) -> Option<&str> { - self.table.as_ref().map(|s| &**s) + self.table.as_deref() } /// If the error was associated with a specific table column, the name of @@ -270,14 +271,14 @@ impl DbError { /// (Refer to the schema and table name fields to identify the table.) /// (PostgreSQL 9.3+) pub fn column(&self) -> Option<&str> { - self.column.as_ref().map(|s| &**s) + self.column.as_deref() } /// If the error was associated with a specific data type, the name of the /// data type. (Refer to the schema name field for the name of the data /// type's schema.) (PostgreSQL 9.3+) pub fn datatype(&self) -> Option<&str> { - self.datatype.as_ref().map(|s| &**s) + self.datatype.as_deref() } /// If the error was associated with a specific constraint, the name of the @@ -287,12 +288,12 @@ impl DbError { /// (For this purpose, indexes are treated as constraints, even if they /// weren't created with constraint syntax.) (PostgreSQL 9.3+) pub fn constraint(&self) -> Option<&str> { - self.constraint.as_ref().map(|s| &**s) + self.constraint.as_deref() } /// The file name of the source-code location where the error was reported. pub fn file(&self) -> Option<&str> { - self.file.as_ref().map(|s| &**s) + self.file.as_deref() } /// The line number of the source-code location where the error was @@ -303,13 +304,20 @@ impl DbError { /// The name of the source-code routine reporting the error. pub fn routine(&self) -> Option<&str> { - self.routine.as_ref().map(|s| &**s) + self.routine.as_deref() } } impl fmt::Display for DbError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "{}: {}", self.severity, self.message) + write!(fmt, "{}: {}", self.severity, self.message)?; + if let Some(detail) = &self.detail { + write!(fmt, "\nDETAIL: {}", detail)?; + } + if let Some(hint) = &self.hint { + write!(fmt, "\nHINT: {}", hint)?; + } + Ok(()) } } @@ -337,6 +345,7 @@ enum Kind { ToSql(usize), FromSql(usize), Column(String), + Parameters(usize, usize), Closed, Db, Parse, @@ -347,6 +356,7 @@ enum Kind { RowCount, #[cfg(feature = "runtime")] Connect, + Timeout, } struct ErrorInner { @@ -375,6 +385,9 @@ impl fmt::Display for Error { Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, Kind::Column(column) => write!(fmt, "invalid column `{}`", column)?, + Kind::Parameters(real, expected) => { + write!(fmt, "expected {expected} parameters but got {real}")? + } Kind::Closed => fmt.write_str("connection closed")?, Kind::Db => fmt.write_str("db error")?, Kind::Parse => fmt.write_str("error parsing response from server")?, @@ -385,6 +398,7 @@ impl fmt::Display for Error { Kind::RowCount => fmt.write_str("query returned an unexpected number of rows")?, #[cfg(feature = "runtime")] Kind::Connect => fmt.write_str("error connecting to server")?, + Kind::Timeout => fmt.write_str("timeout waiting for server")?, }; if let Some(ref cause) = self.0.cause { write!(fmt, ": {}", cause)?; @@ -405,14 +419,23 @@ impl Error { self.0.cause } + /// Returns the source of this error if it was a `DbError`. + /// + /// This is a simple convenience method. + pub fn as_db_error(&self) -> Option<&DbError> { + self.source().and_then(|e| e.downcast_ref::()) + } + + /// Determines if the error was associated with closed connection. + pub fn is_closed(&self) -> bool { + self.0.kind == Kind::Closed + } + /// Returns the SQLSTATE error code associated with the error. /// - /// This is a convenience method that downcasts the cause to a `DbError` - /// and returns its code. + /// This is a convenience method that downcasts the cause to a `DbError` and returns its code. pub fn code(&self) -> Option<&SqlState> { - self.source() - .and_then(|e| e.downcast_ref::()) - .map(DbError::code) + self.as_db_error().map(DbError::code) } fn new(kind: Kind, cause: Option>) -> Error { @@ -456,6 +479,10 @@ impl Error { Error::new(Kind::Column(column), None) } + pub(crate) fn parameters(real: usize, expected: usize) -> Error { + Error::new(Kind::Parameters(real, expected), None) + } + pub(crate) fn tls(e: Box) -> Error { Error::new(Kind::Tls, Some(e)) } @@ -484,4 +511,9 @@ impl Error { pub(crate) fn connect(e: io::Error) -> Error { Error::new(Kind::Connect, Some(Box::new(e))) } + + #[doc(hidden)] + pub fn __private_api_timeout() -> Error { + Error::new(Kind::Timeout, None) + } } diff --git a/tokio-postgres/src/error/sqlstate.rs b/tokio-postgres/src/error/sqlstate.rs index 013a26472..13a1d75f9 100644 --- a/tokio-postgres/src/error/sqlstate.rs +++ b/tokio-postgres/src/error/sqlstate.rs @@ -1,1143 +1,1670 @@ // Autogenerated file - DO NOT EDIT -use std::borrow::Cow; /// A SQLSTATE error code #[derive(PartialEq, Eq, Clone, Debug)] -pub struct SqlState(Cow<'static, str>); +pub struct SqlState(Inner); impl SqlState { /// Creates a `SqlState` from its error code. pub fn from_code(s: &str) -> SqlState { match SQLSTATE_MAP.get(s) { Some(state) => state.clone(), - None => SqlState(Cow::Owned(s.to_string())), + None => SqlState(Inner::Other(s.into())), } } /// Returns the error code corresponding to the `SqlState`. pub fn code(&self) -> &str { - &self.0 + match &self.0 { + Inner::E00000 => "00000", + Inner::E01000 => "01000", + Inner::E0100C => "0100C", + Inner::E01008 => "01008", + Inner::E01003 => "01003", + Inner::E01007 => "01007", + Inner::E01006 => "01006", + Inner::E01004 => "01004", + Inner::E01P01 => "01P01", + Inner::E02000 => "02000", + Inner::E02001 => "02001", + Inner::E03000 => "03000", + Inner::E08000 => "08000", + Inner::E08003 => "08003", + Inner::E08006 => "08006", + Inner::E08001 => "08001", + Inner::E08004 => "08004", + Inner::E08007 => "08007", + Inner::E08P01 => "08P01", + Inner::E09000 => "09000", + Inner::E0A000 => "0A000", + Inner::E0B000 => "0B000", + Inner::E0F000 => "0F000", + Inner::E0F001 => "0F001", + Inner::E0L000 => "0L000", + Inner::E0LP01 => "0LP01", + Inner::E0P000 => "0P000", + Inner::E0Z000 => "0Z000", + Inner::E0Z002 => "0Z002", + Inner::E20000 => "20000", + Inner::E21000 => "21000", + Inner::E22000 => "22000", + Inner::E2202E => "2202E", + Inner::E22021 => "22021", + Inner::E22008 => "22008", + Inner::E22012 => "22012", + Inner::E22005 => "22005", + Inner::E2200B => "2200B", + Inner::E22022 => "22022", + Inner::E22015 => "22015", + Inner::E2201E => "2201E", + Inner::E22014 => "22014", + Inner::E22016 => "22016", + Inner::E2201F => "2201F", + Inner::E2201G => "2201G", + Inner::E22018 => "22018", + Inner::E22007 => "22007", + Inner::E22019 => "22019", + Inner::E2200D => "2200D", + Inner::E22025 => "22025", + Inner::E22P06 => "22P06", + Inner::E22010 => "22010", + Inner::E22023 => "22023", + Inner::E22013 => "22013", + Inner::E2201B => "2201B", + Inner::E2201W => "2201W", + Inner::E2201X => "2201X", + Inner::E2202H => "2202H", + Inner::E2202G => "2202G", + Inner::E22009 => "22009", + Inner::E2200C => "2200C", + Inner::E2200G => "2200G", + Inner::E22004 => "22004", + Inner::E22002 => "22002", + Inner::E22003 => "22003", + Inner::E2200H => "2200H", + Inner::E22026 => "22026", + Inner::E22001 => "22001", + Inner::E22011 => "22011", + Inner::E22027 => "22027", + Inner::E22024 => "22024", + Inner::E2200F => "2200F", + Inner::E22P01 => "22P01", + Inner::E22P02 => "22P02", + Inner::E22P03 => "22P03", + Inner::E22P04 => "22P04", + Inner::E22P05 => "22P05", + Inner::E2200L => "2200L", + Inner::E2200M => "2200M", + Inner::E2200N => "2200N", + Inner::E2200S => "2200S", + Inner::E2200T => "2200T", + Inner::E22030 => "22030", + Inner::E22031 => "22031", + Inner::E22032 => "22032", + Inner::E22033 => "22033", + Inner::E22034 => "22034", + Inner::E22035 => "22035", + Inner::E22036 => "22036", + Inner::E22037 => "22037", + Inner::E22038 => "22038", + Inner::E22039 => "22039", + Inner::E2203A => "2203A", + Inner::E2203B => "2203B", + Inner::E2203C => "2203C", + Inner::E2203D => "2203D", + Inner::E2203E => "2203E", + Inner::E2203F => "2203F", + Inner::E2203G => "2203G", + Inner::E23000 => "23000", + Inner::E23001 => "23001", + Inner::E23502 => "23502", + Inner::E23503 => "23503", + Inner::E23505 => "23505", + Inner::E23514 => "23514", + Inner::E23P01 => "23P01", + Inner::E24000 => "24000", + Inner::E25000 => "25000", + Inner::E25001 => "25001", + Inner::E25002 => "25002", + Inner::E25008 => "25008", + Inner::E25003 => "25003", + Inner::E25004 => "25004", + Inner::E25005 => "25005", + Inner::E25006 => "25006", + Inner::E25007 => "25007", + Inner::E25P01 => "25P01", + Inner::E25P02 => "25P02", + Inner::E25P03 => "25P03", + Inner::E26000 => "26000", + Inner::E27000 => "27000", + Inner::E28000 => "28000", + Inner::E28P01 => "28P01", + Inner::E2B000 => "2B000", + Inner::E2BP01 => "2BP01", + Inner::E2D000 => "2D000", + Inner::E2F000 => "2F000", + Inner::E2F005 => "2F005", + Inner::E2F002 => "2F002", + Inner::E2F003 => "2F003", + Inner::E2F004 => "2F004", + Inner::E34000 => "34000", + Inner::E38000 => "38000", + Inner::E38001 => "38001", + Inner::E38002 => "38002", + Inner::E38003 => "38003", + Inner::E38004 => "38004", + Inner::E39000 => "39000", + Inner::E39001 => "39001", + Inner::E39004 => "39004", + Inner::E39P01 => "39P01", + Inner::E39P02 => "39P02", + Inner::E39P03 => "39P03", + Inner::E3B000 => "3B000", + Inner::E3B001 => "3B001", + Inner::E3D000 => "3D000", + Inner::E3F000 => "3F000", + Inner::E40000 => "40000", + Inner::E40002 => "40002", + Inner::E40001 => "40001", + Inner::E40003 => "40003", + Inner::E40P01 => "40P01", + Inner::E42000 => "42000", + Inner::E42601 => "42601", + Inner::E42501 => "42501", + Inner::E42846 => "42846", + Inner::E42803 => "42803", + Inner::E42P20 => "42P20", + Inner::E42P19 => "42P19", + Inner::E42830 => "42830", + Inner::E42602 => "42602", + Inner::E42622 => "42622", + Inner::E42939 => "42939", + Inner::E42804 => "42804", + Inner::E42P18 => "42P18", + Inner::E42P21 => "42P21", + Inner::E42P22 => "42P22", + Inner::E42809 => "42809", + Inner::E428C9 => "428C9", + Inner::E42703 => "42703", + Inner::E42883 => "42883", + Inner::E42P01 => "42P01", + Inner::E42P02 => "42P02", + Inner::E42704 => "42704", + Inner::E42701 => "42701", + Inner::E42P03 => "42P03", + Inner::E42P04 => "42P04", + Inner::E42723 => "42723", + Inner::E42P05 => "42P05", + Inner::E42P06 => "42P06", + Inner::E42P07 => "42P07", + Inner::E42712 => "42712", + Inner::E42710 => "42710", + Inner::E42702 => "42702", + Inner::E42725 => "42725", + Inner::E42P08 => "42P08", + Inner::E42P09 => "42P09", + Inner::E42P10 => "42P10", + Inner::E42611 => "42611", + Inner::E42P11 => "42P11", + Inner::E42P12 => "42P12", + Inner::E42P13 => "42P13", + Inner::E42P14 => "42P14", + Inner::E42P15 => "42P15", + Inner::E42P16 => "42P16", + Inner::E42P17 => "42P17", + Inner::E44000 => "44000", + Inner::E53000 => "53000", + Inner::E53100 => "53100", + Inner::E53200 => "53200", + Inner::E53300 => "53300", + Inner::E53400 => "53400", + Inner::E54000 => "54000", + Inner::E54001 => "54001", + Inner::E54011 => "54011", + Inner::E54023 => "54023", + Inner::E55000 => "55000", + Inner::E55006 => "55006", + Inner::E55P02 => "55P02", + Inner::E55P03 => "55P03", + Inner::E55P04 => "55P04", + Inner::E57000 => "57000", + Inner::E57014 => "57014", + Inner::E57P01 => "57P01", + Inner::E57P02 => "57P02", + Inner::E57P03 => "57P03", + Inner::E57P04 => "57P04", + Inner::E57P05 => "57P05", + Inner::E58000 => "58000", + Inner::E58030 => "58030", + Inner::E58P01 => "58P01", + Inner::E58P02 => "58P02", + Inner::E72000 => "72000", + Inner::EF0000 => "F0000", + Inner::EF0001 => "F0001", + Inner::EHV000 => "HV000", + Inner::EHV005 => "HV005", + Inner::EHV002 => "HV002", + Inner::EHV010 => "HV010", + Inner::EHV021 => "HV021", + Inner::EHV024 => "HV024", + Inner::EHV007 => "HV007", + Inner::EHV008 => "HV008", + Inner::EHV004 => "HV004", + Inner::EHV006 => "HV006", + Inner::EHV091 => "HV091", + Inner::EHV00B => "HV00B", + Inner::EHV00C => "HV00C", + Inner::EHV00D => "HV00D", + Inner::EHV090 => "HV090", + Inner::EHV00A => "HV00A", + Inner::EHV009 => "HV009", + Inner::EHV014 => "HV014", + Inner::EHV001 => "HV001", + Inner::EHV00P => "HV00P", + Inner::EHV00J => "HV00J", + Inner::EHV00K => "HV00K", + Inner::EHV00Q => "HV00Q", + Inner::EHV00R => "HV00R", + Inner::EHV00L => "HV00L", + Inner::EHV00M => "HV00M", + Inner::EHV00N => "HV00N", + Inner::EP0000 => "P0000", + Inner::EP0001 => "P0001", + Inner::EP0002 => "P0002", + Inner::EP0003 => "P0003", + Inner::EP0004 => "P0004", + Inner::EXX000 => "XX000", + Inner::EXX001 => "XX001", + Inner::EXX002 => "XX002", + Inner::Other(code) => code, + } } /// 00000 - pub const SUCCESSFUL_COMPLETION: SqlState = SqlState(Cow::Borrowed("00000")); + pub const SUCCESSFUL_COMPLETION: SqlState = SqlState(Inner::E00000); /// 01000 - pub const WARNING: SqlState = SqlState(Cow::Borrowed("01000")); + pub const WARNING: SqlState = SqlState(Inner::E01000); /// 0100C - pub const WARNING_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Cow::Borrowed("0100C")); + pub const WARNING_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Inner::E0100C); /// 01008 - pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Cow::Borrowed("01008")); + pub const WARNING_IMPLICIT_ZERO_BIT_PADDING: SqlState = SqlState(Inner::E01008); /// 01003 - pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = - SqlState(Cow::Borrowed("01003")); + pub const WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION: SqlState = SqlState(Inner::E01003); /// 01007 - pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Cow::Borrowed("01007")); + pub const WARNING_PRIVILEGE_NOT_GRANTED: SqlState = SqlState(Inner::E01007); /// 01006 - pub const WARNING_PRIVILEGE_NOT_REVOKED: SqlState = SqlState(Cow::Borrowed("01006")); + pub const WARNING_PRIVILEGE_NOT_REVOKED: SqlState = SqlState(Inner::E01006); /// 01004 - pub const WARNING_STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Cow::Borrowed("01004")); + pub const WARNING_STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Inner::E01004); /// 01P01 - pub const WARNING_DEPRECATED_FEATURE: SqlState = SqlState(Cow::Borrowed("01P01")); + pub const WARNING_DEPRECATED_FEATURE: SqlState = SqlState(Inner::E01P01); /// 02000 - pub const NO_DATA: SqlState = SqlState(Cow::Borrowed("02000")); + pub const NO_DATA: SqlState = SqlState(Inner::E02000); /// 02001 - pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = - SqlState(Cow::Borrowed("02001")); + pub const NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED: SqlState = SqlState(Inner::E02001); /// 03000 - pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Cow::Borrowed("03000")); + pub const SQL_STATEMENT_NOT_YET_COMPLETE: SqlState = SqlState(Inner::E03000); /// 08000 - pub const CONNECTION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("08000")); + pub const CONNECTION_EXCEPTION: SqlState = SqlState(Inner::E08000); /// 08003 - pub const CONNECTION_DOES_NOT_EXIST: SqlState = SqlState(Cow::Borrowed("08003")); + pub const CONNECTION_DOES_NOT_EXIST: SqlState = SqlState(Inner::E08003); /// 08006 - pub const CONNECTION_FAILURE: SqlState = SqlState(Cow::Borrowed("08006")); + pub const CONNECTION_FAILURE: SqlState = SqlState(Inner::E08006); /// 08001 - pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = - SqlState(Cow::Borrowed("08001")); + pub const SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION: SqlState = SqlState(Inner::E08001); /// 08004 - pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = - SqlState(Cow::Borrowed("08004")); + pub const SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION: SqlState = SqlState(Inner::E08004); /// 08007 - pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("08007")); + pub const TRANSACTION_RESOLUTION_UNKNOWN: SqlState = SqlState(Inner::E08007); /// 08P01 - pub const PROTOCOL_VIOLATION: SqlState = SqlState(Cow::Borrowed("08P01")); + pub const PROTOCOL_VIOLATION: SqlState = SqlState(Inner::E08P01); /// 09000 - pub const TRIGGERED_ACTION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("09000")); + pub const TRIGGERED_ACTION_EXCEPTION: SqlState = SqlState(Inner::E09000); /// 0A000 - pub const FEATURE_NOT_SUPPORTED: SqlState = SqlState(Cow::Borrowed("0A000")); + pub const FEATURE_NOT_SUPPORTED: SqlState = SqlState(Inner::E0A000); /// 0B000 - pub const INVALID_TRANSACTION_INITIATION: SqlState = SqlState(Cow::Borrowed("0B000")); + pub const INVALID_TRANSACTION_INITIATION: SqlState = SqlState(Inner::E0B000); /// 0F000 - pub const LOCATOR_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0F000")); + pub const LOCATOR_EXCEPTION: SqlState = SqlState(Inner::E0F000); /// 0F001 - pub const L_E_INVALID_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("0F001")); + pub const L_E_INVALID_SPECIFICATION: SqlState = SqlState(Inner::E0F001); /// 0L000 - pub const INVALID_GRANTOR: SqlState = SqlState(Cow::Borrowed("0L000")); + pub const INVALID_GRANTOR: SqlState = SqlState(Inner::E0L000); /// 0LP01 - pub const INVALID_GRANT_OPERATION: SqlState = SqlState(Cow::Borrowed("0LP01")); + pub const INVALID_GRANT_OPERATION: SqlState = SqlState(Inner::E0LP01); /// 0P000 - pub const INVALID_ROLE_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("0P000")); + pub const INVALID_ROLE_SPECIFICATION: SqlState = SqlState(Inner::E0P000); /// 0Z000 - pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Cow::Borrowed("0Z000")); + pub const DIAGNOSTICS_EXCEPTION: SqlState = SqlState(Inner::E0Z000); /// 0Z002 pub const STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER: SqlState = - SqlState(Cow::Borrowed("0Z002")); + SqlState(Inner::E0Z002); /// 20000 - pub const CASE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("20000")); + pub const CASE_NOT_FOUND: SqlState = SqlState(Inner::E20000); /// 21000 - pub const CARDINALITY_VIOLATION: SqlState = SqlState(Cow::Borrowed("21000")); + pub const CARDINALITY_VIOLATION: SqlState = SqlState(Inner::E21000); /// 22000 - pub const DATA_EXCEPTION: SqlState = SqlState(Cow::Borrowed("22000")); + pub const DATA_EXCEPTION: SqlState = SqlState(Inner::E22000); /// 2202E - pub const ARRAY_ELEMENT_ERROR: SqlState = SqlState(Cow::Borrowed("2202E")); + pub const ARRAY_ELEMENT_ERROR: SqlState = SqlState(Inner::E2202E); /// 2202E - pub const ARRAY_SUBSCRIPT_ERROR: SqlState = SqlState(Cow::Borrowed("2202E")); + pub const ARRAY_SUBSCRIPT_ERROR: SqlState = SqlState(Inner::E2202E); /// 22021 - pub const CHARACTER_NOT_IN_REPERTOIRE: SqlState = SqlState(Cow::Borrowed("22021")); + pub const CHARACTER_NOT_IN_REPERTOIRE: SqlState = SqlState(Inner::E22021); /// 22008 - pub const DATETIME_FIELD_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22008")); + pub const DATETIME_FIELD_OVERFLOW: SqlState = SqlState(Inner::E22008); /// 22008 - pub const DATETIME_VALUE_OUT_OF_RANGE: SqlState = SqlState(Cow::Borrowed("22008")); + pub const DATETIME_VALUE_OUT_OF_RANGE: SqlState = SqlState(Inner::E22008); /// 22012 - pub const DIVISION_BY_ZERO: SqlState = SqlState(Cow::Borrowed("22012")); + pub const DIVISION_BY_ZERO: SqlState = SqlState(Inner::E22012); /// 22005 - pub const ERROR_IN_ASSIGNMENT: SqlState = SqlState(Cow::Borrowed("22005")); + pub const ERROR_IN_ASSIGNMENT: SqlState = SqlState(Inner::E22005); /// 2200B - pub const ESCAPE_CHARACTER_CONFLICT: SqlState = SqlState(Cow::Borrowed("2200B")); + pub const ESCAPE_CHARACTER_CONFLICT: SqlState = SqlState(Inner::E2200B); /// 22022 - pub const INDICATOR_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22022")); + pub const INDICATOR_OVERFLOW: SqlState = SqlState(Inner::E22022); /// 22015 - pub const INTERVAL_FIELD_OVERFLOW: SqlState = SqlState(Cow::Borrowed("22015")); + pub const INTERVAL_FIELD_OVERFLOW: SqlState = SqlState(Inner::E22015); /// 2201E - pub const INVALID_ARGUMENT_FOR_LOG: SqlState = SqlState(Cow::Borrowed("2201E")); + pub const INVALID_ARGUMENT_FOR_LOG: SqlState = SqlState(Inner::E2201E); /// 22014 - pub const INVALID_ARGUMENT_FOR_NTILE: SqlState = SqlState(Cow::Borrowed("22014")); + pub const INVALID_ARGUMENT_FOR_NTILE: SqlState = SqlState(Inner::E22014); /// 22016 - pub const INVALID_ARGUMENT_FOR_NTH_VALUE: SqlState = SqlState(Cow::Borrowed("22016")); + pub const INVALID_ARGUMENT_FOR_NTH_VALUE: SqlState = SqlState(Inner::E22016); /// 2201F - pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Cow::Borrowed("2201F")); + pub const INVALID_ARGUMENT_FOR_POWER_FUNCTION: SqlState = SqlState(Inner::E2201F); /// 2201G - pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = - SqlState(Cow::Borrowed("2201G")); + pub const INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION: SqlState = SqlState(Inner::E2201G); /// 22018 - pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Cow::Borrowed("22018")); + pub const INVALID_CHARACTER_VALUE_FOR_CAST: SqlState = SqlState(Inner::E22018); /// 22007 - pub const INVALID_DATETIME_FORMAT: SqlState = SqlState(Cow::Borrowed("22007")); + pub const INVALID_DATETIME_FORMAT: SqlState = SqlState(Inner::E22007); /// 22019 - pub const INVALID_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22019")); + pub const INVALID_ESCAPE_CHARACTER: SqlState = SqlState(Inner::E22019); /// 2200D - pub const INVALID_ESCAPE_OCTET: SqlState = SqlState(Cow::Borrowed("2200D")); + pub const INVALID_ESCAPE_OCTET: SqlState = SqlState(Inner::E2200D); /// 22025 - pub const INVALID_ESCAPE_SEQUENCE: SqlState = SqlState(Cow::Borrowed("22025")); + pub const INVALID_ESCAPE_SEQUENCE: SqlState = SqlState(Inner::E22025); /// 22P06 - pub const NONSTANDARD_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22P06")); + pub const NONSTANDARD_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Inner::E22P06); /// 22010 - pub const INVALID_INDICATOR_PARAMETER_VALUE: SqlState = SqlState(Cow::Borrowed("22010")); + pub const INVALID_INDICATOR_PARAMETER_VALUE: SqlState = SqlState(Inner::E22010); /// 22023 - pub const INVALID_PARAMETER_VALUE: SqlState = SqlState(Cow::Borrowed("22023")); + pub const INVALID_PARAMETER_VALUE: SqlState = SqlState(Inner::E22023); /// 22013 - pub const INVALID_PRECEDING_OR_FOLLOWING_SIZE: SqlState = SqlState(Cow::Borrowed("22013")); + pub const INVALID_PRECEDING_OR_FOLLOWING_SIZE: SqlState = SqlState(Inner::E22013); /// 2201B - pub const INVALID_REGULAR_EXPRESSION: SqlState = SqlState(Cow::Borrowed("2201B")); + pub const INVALID_REGULAR_EXPRESSION: SqlState = SqlState(Inner::E2201B); /// 2201W - pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Cow::Borrowed("2201W")); + pub const INVALID_ROW_COUNT_IN_LIMIT_CLAUSE: SqlState = SqlState(Inner::E2201W); /// 2201X - pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = - SqlState(Cow::Borrowed("2201X")); + pub const INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE: SqlState = SqlState(Inner::E2201X); /// 2202H - pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Cow::Borrowed("2202H")); + pub const INVALID_TABLESAMPLE_ARGUMENT: SqlState = SqlState(Inner::E2202H); /// 2202G - pub const INVALID_TABLESAMPLE_REPEAT: SqlState = SqlState(Cow::Borrowed("2202G")); + pub const INVALID_TABLESAMPLE_REPEAT: SqlState = SqlState(Inner::E2202G); /// 22009 - pub const INVALID_TIME_ZONE_DISPLACEMENT_VALUE: SqlState = SqlState(Cow::Borrowed("22009")); + pub const INVALID_TIME_ZONE_DISPLACEMENT_VALUE: SqlState = SqlState(Inner::E22009); /// 2200C - pub const INVALID_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Cow::Borrowed("2200C")); + pub const INVALID_USE_OF_ESCAPE_CHARACTER: SqlState = SqlState(Inner::E2200C); /// 2200G - pub const MOST_SPECIFIC_TYPE_MISMATCH: SqlState = SqlState(Cow::Borrowed("2200G")); + pub const MOST_SPECIFIC_TYPE_MISMATCH: SqlState = SqlState(Inner::E2200G); /// 22004 - pub const NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Cow::Borrowed("22004")); + pub const NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Inner::E22004); /// 22002 - pub const NULL_VALUE_NO_INDICATOR_PARAMETER: SqlState = SqlState(Cow::Borrowed("22002")); + pub const NULL_VALUE_NO_INDICATOR_PARAMETER: SqlState = SqlState(Inner::E22002); /// 22003 - pub const NUMERIC_VALUE_OUT_OF_RANGE: SqlState = SqlState(Cow::Borrowed("22003")); + pub const NUMERIC_VALUE_OUT_OF_RANGE: SqlState = SqlState(Inner::E22003); /// 2200H - pub const SEQUENCE_GENERATOR_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("2200H")); + pub const SEQUENCE_GENERATOR_LIMIT_EXCEEDED: SqlState = SqlState(Inner::E2200H); /// 22026 - pub const STRING_DATA_LENGTH_MISMATCH: SqlState = SqlState(Cow::Borrowed("22026")); + pub const STRING_DATA_LENGTH_MISMATCH: SqlState = SqlState(Inner::E22026); /// 22001 - pub const STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Cow::Borrowed("22001")); + pub const STRING_DATA_RIGHT_TRUNCATION: SqlState = SqlState(Inner::E22001); /// 22011 - pub const SUBSTRING_ERROR: SqlState = SqlState(Cow::Borrowed("22011")); + pub const SUBSTRING_ERROR: SqlState = SqlState(Inner::E22011); /// 22027 - pub const TRIM_ERROR: SqlState = SqlState(Cow::Borrowed("22027")); + pub const TRIM_ERROR: SqlState = SqlState(Inner::E22027); /// 22024 - pub const UNTERMINATED_C_STRING: SqlState = SqlState(Cow::Borrowed("22024")); + pub const UNTERMINATED_C_STRING: SqlState = SqlState(Inner::E22024); /// 2200F - pub const ZERO_LENGTH_CHARACTER_STRING: SqlState = SqlState(Cow::Borrowed("2200F")); + pub const ZERO_LENGTH_CHARACTER_STRING: SqlState = SqlState(Inner::E2200F); /// 22P01 - pub const FLOATING_POINT_EXCEPTION: SqlState = SqlState(Cow::Borrowed("22P01")); + pub const FLOATING_POINT_EXCEPTION: SqlState = SqlState(Inner::E22P01); /// 22P02 - pub const INVALID_TEXT_REPRESENTATION: SqlState = SqlState(Cow::Borrowed("22P02")); + pub const INVALID_TEXT_REPRESENTATION: SqlState = SqlState(Inner::E22P02); /// 22P03 - pub const INVALID_BINARY_REPRESENTATION: SqlState = SqlState(Cow::Borrowed("22P03")); + pub const INVALID_BINARY_REPRESENTATION: SqlState = SqlState(Inner::E22P03); /// 22P04 - pub const BAD_COPY_FILE_FORMAT: SqlState = SqlState(Cow::Borrowed("22P04")); + pub const BAD_COPY_FILE_FORMAT: SqlState = SqlState(Inner::E22P04); /// 22P05 - pub const UNTRANSLATABLE_CHARACTER: SqlState = SqlState(Cow::Borrowed("22P05")); + pub const UNTRANSLATABLE_CHARACTER: SqlState = SqlState(Inner::E22P05); /// 2200L - pub const NOT_AN_XML_DOCUMENT: SqlState = SqlState(Cow::Borrowed("2200L")); + pub const NOT_AN_XML_DOCUMENT: SqlState = SqlState(Inner::E2200L); /// 2200M - pub const INVALID_XML_DOCUMENT: SqlState = SqlState(Cow::Borrowed("2200M")); + pub const INVALID_XML_DOCUMENT: SqlState = SqlState(Inner::E2200M); /// 2200N - pub const INVALID_XML_CONTENT: SqlState = SqlState(Cow::Borrowed("2200N")); + pub const INVALID_XML_CONTENT: SqlState = SqlState(Inner::E2200N); /// 2200S - pub const INVALID_XML_COMMENT: SqlState = SqlState(Cow::Borrowed("2200S")); + pub const INVALID_XML_COMMENT: SqlState = SqlState(Inner::E2200S); /// 2200T - pub const INVALID_XML_PROCESSING_INSTRUCTION: SqlState = SqlState(Cow::Borrowed("2200T")); + pub const INVALID_XML_PROCESSING_INSTRUCTION: SqlState = SqlState(Inner::E2200T); /// 22030 - pub const DUPLICATE_JSON_OBJECT_KEY_VALUE: SqlState = SqlState(Cow::Borrowed("22030")); + pub const DUPLICATE_JSON_OBJECT_KEY_VALUE: SqlState = SqlState(Inner::E22030); + + /// 22031 + pub const INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION: SqlState = SqlState(Inner::E22031); /// 22032 - pub const INVALID_JSON_TEXT: SqlState = SqlState(Cow::Borrowed("22032")); + pub const INVALID_JSON_TEXT: SqlState = SqlState(Inner::E22032); /// 22033 - pub const INVALID_SQL_JSON_SUBSCRIPT: SqlState = SqlState(Cow::Borrowed("22033")); + pub const INVALID_SQL_JSON_SUBSCRIPT: SqlState = SqlState(Inner::E22033); /// 22034 - pub const MORE_THAN_ONE_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22034")); + pub const MORE_THAN_ONE_SQL_JSON_ITEM: SqlState = SqlState(Inner::E22034); /// 22035 - pub const NO_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22035")); + pub const NO_SQL_JSON_ITEM: SqlState = SqlState(Inner::E22035); /// 22036 - pub const NON_NUMERIC_SQL_JSON_ITEM: SqlState = SqlState(Cow::Borrowed("22036")); + pub const NON_NUMERIC_SQL_JSON_ITEM: SqlState = SqlState(Inner::E22036); /// 22037 - pub const NON_UNIQUE_KEYS_IN_A_JSON_OBJECT: SqlState = SqlState(Cow::Borrowed("22037")); + pub const NON_UNIQUE_KEYS_IN_A_JSON_OBJECT: SqlState = SqlState(Inner::E22037); /// 22038 - pub const SINGLETON_SQL_JSON_ITEM_REQUIRED: SqlState = SqlState(Cow::Borrowed("22038")); + pub const SINGLETON_SQL_JSON_ITEM_REQUIRED: SqlState = SqlState(Inner::E22038); /// 22039 - pub const SQL_JSON_ARRAY_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("22039")); + pub const SQL_JSON_ARRAY_NOT_FOUND: SqlState = SqlState(Inner::E22039); /// 2203A - pub const SQL_JSON_MEMBER_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203A")); + pub const SQL_JSON_MEMBER_NOT_FOUND: SqlState = SqlState(Inner::E2203A); /// 2203B - pub const SQL_JSON_NUMBER_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203B")); + pub const SQL_JSON_NUMBER_NOT_FOUND: SqlState = SqlState(Inner::E2203B); /// 2203C - pub const SQL_JSON_OBJECT_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("2203C")); + pub const SQL_JSON_OBJECT_NOT_FOUND: SqlState = SqlState(Inner::E2203C); /// 2203D - pub const TOO_MANY_JSON_ARRAY_ELEMENTS: SqlState = SqlState(Cow::Borrowed("2203D")); + pub const TOO_MANY_JSON_ARRAY_ELEMENTS: SqlState = SqlState(Inner::E2203D); /// 2203E - pub const TOO_MANY_JSON_OBJECT_MEMBERS: SqlState = SqlState(Cow::Borrowed("2203E")); + pub const TOO_MANY_JSON_OBJECT_MEMBERS: SqlState = SqlState(Inner::E2203E); /// 2203F - pub const SQL_JSON_SCALAR_REQUIRED: SqlState = SqlState(Cow::Borrowed("2203F")); + pub const SQL_JSON_SCALAR_REQUIRED: SqlState = SqlState(Inner::E2203F); + + /// 2203G + pub const SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE: SqlState = SqlState(Inner::E2203G); /// 23000 - pub const INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Cow::Borrowed("23000")); + pub const INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Inner::E23000); /// 23001 - pub const RESTRICT_VIOLATION: SqlState = SqlState(Cow::Borrowed("23001")); + pub const RESTRICT_VIOLATION: SqlState = SqlState(Inner::E23001); /// 23502 - pub const NOT_NULL_VIOLATION: SqlState = SqlState(Cow::Borrowed("23502")); + pub const NOT_NULL_VIOLATION: SqlState = SqlState(Inner::E23502); /// 23503 - pub const FOREIGN_KEY_VIOLATION: SqlState = SqlState(Cow::Borrowed("23503")); + pub const FOREIGN_KEY_VIOLATION: SqlState = SqlState(Inner::E23503); /// 23505 - pub const UNIQUE_VIOLATION: SqlState = SqlState(Cow::Borrowed("23505")); + pub const UNIQUE_VIOLATION: SqlState = SqlState(Inner::E23505); /// 23514 - pub const CHECK_VIOLATION: SqlState = SqlState(Cow::Borrowed("23514")); + pub const CHECK_VIOLATION: SqlState = SqlState(Inner::E23514); /// 23P01 - pub const EXCLUSION_VIOLATION: SqlState = SqlState(Cow::Borrowed("23P01")); + pub const EXCLUSION_VIOLATION: SqlState = SqlState(Inner::E23P01); /// 24000 - pub const INVALID_CURSOR_STATE: SqlState = SqlState(Cow::Borrowed("24000")); + pub const INVALID_CURSOR_STATE: SqlState = SqlState(Inner::E24000); /// 25000 - pub const INVALID_TRANSACTION_STATE: SqlState = SqlState(Cow::Borrowed("25000")); + pub const INVALID_TRANSACTION_STATE: SqlState = SqlState(Inner::E25000); /// 25001 - pub const ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25001")); + pub const ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Inner::E25001); /// 25002 - pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Cow::Borrowed("25002")); + pub const BRANCH_TRANSACTION_ALREADY_ACTIVE: SqlState = SqlState(Inner::E25002); /// 25008 - pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = - SqlState(Cow::Borrowed("25008")); + pub const HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL: SqlState = SqlState(Inner::E25008); /// 25003 - pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25003")); + pub const INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25003); /// 25004 pub const INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25004")); + SqlState(Inner::E25004); /// 25005 - pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = - SqlState(Cow::Borrowed("25005")); + pub const NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION: SqlState = SqlState(Inner::E25005); /// 25006 - pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25006")); + pub const READ_ONLY_SQL_TRANSACTION: SqlState = SqlState(Inner::E25006); /// 25007 - pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = - SqlState(Cow::Borrowed("25007")); + pub const SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED: SqlState = SqlState(Inner::E25007); /// 25P01 - pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P01")); + pub const NO_ACTIVE_SQL_TRANSACTION: SqlState = SqlState(Inner::E25P01); /// 25P02 - pub const IN_FAILED_SQL_TRANSACTION: SqlState = SqlState(Cow::Borrowed("25P02")); + pub const IN_FAILED_SQL_TRANSACTION: SqlState = SqlState(Inner::E25P02); /// 25P03 - pub const IDLE_IN_TRANSACTION_SESSION_TIMEOUT: SqlState = SqlState(Cow::Borrowed("25P03")); + pub const IDLE_IN_TRANSACTION_SESSION_TIMEOUT: SqlState = SqlState(Inner::E25P03); /// 26000 - pub const INVALID_SQL_STATEMENT_NAME: SqlState = SqlState(Cow::Borrowed("26000")); + pub const INVALID_SQL_STATEMENT_NAME: SqlState = SqlState(Inner::E26000); /// 26000 - pub const UNDEFINED_PSTATEMENT: SqlState = SqlState(Cow::Borrowed("26000")); + pub const UNDEFINED_PSTATEMENT: SqlState = SqlState(Inner::E26000); /// 27000 - pub const TRIGGERED_DATA_CHANGE_VIOLATION: SqlState = SqlState(Cow::Borrowed("27000")); + pub const TRIGGERED_DATA_CHANGE_VIOLATION: SqlState = SqlState(Inner::E27000); /// 28000 - pub const INVALID_AUTHORIZATION_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("28000")); + pub const INVALID_AUTHORIZATION_SPECIFICATION: SqlState = SqlState(Inner::E28000); /// 28P01 - pub const INVALID_PASSWORD: SqlState = SqlState(Cow::Borrowed("28P01")); + pub const INVALID_PASSWORD: SqlState = SqlState(Inner::E28P01); /// 2B000 - pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = - SqlState(Cow::Borrowed("2B000")); + pub const DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST: SqlState = SqlState(Inner::E2B000); /// 2BP01 - pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Cow::Borrowed("2BP01")); + pub const DEPENDENT_OBJECTS_STILL_EXIST: SqlState = SqlState(Inner::E2BP01); /// 2D000 - pub const INVALID_TRANSACTION_TERMINATION: SqlState = SqlState(Cow::Borrowed("2D000")); + pub const INVALID_TRANSACTION_TERMINATION: SqlState = SqlState(Inner::E2D000); /// 2F000 - pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("2F000")); + pub const SQL_ROUTINE_EXCEPTION: SqlState = SqlState(Inner::E2F000); /// 2F005 - pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = - SqlState(Cow::Borrowed("2F005")); + pub const S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT: SqlState = SqlState(Inner::E2F005); /// 2F002 - pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F002")); + pub const S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Inner::E2F002); /// 2F003 - pub const S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Cow::Borrowed("2F003")); + pub const S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Inner::E2F003); /// 2F004 - pub const S_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("2F004")); + pub const S_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Inner::E2F004); /// 34000 - pub const INVALID_CURSOR_NAME: SqlState = SqlState(Cow::Borrowed("34000")); + pub const INVALID_CURSOR_NAME: SqlState = SqlState(Inner::E34000); /// 34000 - pub const UNDEFINED_CURSOR: SqlState = SqlState(Cow::Borrowed("34000")); + pub const UNDEFINED_CURSOR: SqlState = SqlState(Inner::E34000); /// 38000 - pub const EXTERNAL_ROUTINE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("38000")); + pub const EXTERNAL_ROUTINE_EXCEPTION: SqlState = SqlState(Inner::E38000); /// 38001 - pub const E_R_E_CONTAINING_SQL_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38001")); + pub const E_R_E_CONTAINING_SQL_NOT_PERMITTED: SqlState = SqlState(Inner::E38001); /// 38002 - pub const E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38002")); + pub const E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Inner::E38002); /// 38003 - pub const E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Cow::Borrowed("38003")); + pub const E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED: SqlState = SqlState(Inner::E38003); /// 38004 - pub const E_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Cow::Borrowed("38004")); + pub const E_R_E_READING_SQL_DATA_NOT_PERMITTED: SqlState = SqlState(Inner::E38004); /// 39000 - pub const EXTERNAL_ROUTINE_INVOCATION_EXCEPTION: SqlState = SqlState(Cow::Borrowed("39000")); + pub const EXTERNAL_ROUTINE_INVOCATION_EXCEPTION: SqlState = SqlState(Inner::E39000); /// 39001 - pub const E_R_I_E_INVALID_SQLSTATE_RETURNED: SqlState = SqlState(Cow::Borrowed("39001")); + pub const E_R_I_E_INVALID_SQLSTATE_RETURNED: SqlState = SqlState(Inner::E39001); /// 39004 - pub const E_R_I_E_NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Cow::Borrowed("39004")); + pub const E_R_I_E_NULL_VALUE_NOT_ALLOWED: SqlState = SqlState(Inner::E39004); /// 39P01 - pub const E_R_I_E_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P01")); + pub const E_R_I_E_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Inner::E39P01); /// 39P02 - pub const E_R_I_E_SRF_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P02")); + pub const E_R_I_E_SRF_PROTOCOL_VIOLATED: SqlState = SqlState(Inner::E39P02); /// 39P03 - pub const E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Cow::Borrowed("39P03")); + pub const E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED: SqlState = SqlState(Inner::E39P03); /// 3B000 - pub const SAVEPOINT_EXCEPTION: SqlState = SqlState(Cow::Borrowed("3B000")); + pub const SAVEPOINT_EXCEPTION: SqlState = SqlState(Inner::E3B000); /// 3B001 - pub const S_E_INVALID_SPECIFICATION: SqlState = SqlState(Cow::Borrowed("3B001")); + pub const S_E_INVALID_SPECIFICATION: SqlState = SqlState(Inner::E3B001); /// 3D000 - pub const INVALID_CATALOG_NAME: SqlState = SqlState(Cow::Borrowed("3D000")); + pub const INVALID_CATALOG_NAME: SqlState = SqlState(Inner::E3D000); /// 3D000 - pub const UNDEFINED_DATABASE: SqlState = SqlState(Cow::Borrowed("3D000")); + pub const UNDEFINED_DATABASE: SqlState = SqlState(Inner::E3D000); /// 3F000 - pub const INVALID_SCHEMA_NAME: SqlState = SqlState(Cow::Borrowed("3F000")); + pub const INVALID_SCHEMA_NAME: SqlState = SqlState(Inner::E3F000); /// 3F000 - pub const UNDEFINED_SCHEMA: SqlState = SqlState(Cow::Borrowed("3F000")); + pub const UNDEFINED_SCHEMA: SqlState = SqlState(Inner::E3F000); /// 40000 - pub const TRANSACTION_ROLLBACK: SqlState = SqlState(Cow::Borrowed("40000")); + pub const TRANSACTION_ROLLBACK: SqlState = SqlState(Inner::E40000); /// 40002 - pub const T_R_INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Cow::Borrowed("40002")); + pub const T_R_INTEGRITY_CONSTRAINT_VIOLATION: SqlState = SqlState(Inner::E40002); /// 40001 - pub const T_R_SERIALIZATION_FAILURE: SqlState = SqlState(Cow::Borrowed("40001")); + pub const T_R_SERIALIZATION_FAILURE: SqlState = SqlState(Inner::E40001); /// 40003 - pub const T_R_STATEMENT_COMPLETION_UNKNOWN: SqlState = SqlState(Cow::Borrowed("40003")); + pub const T_R_STATEMENT_COMPLETION_UNKNOWN: SqlState = SqlState(Inner::E40003); /// 40P01 - pub const T_R_DEADLOCK_DETECTED: SqlState = SqlState(Cow::Borrowed("40P01")); + pub const T_R_DEADLOCK_DETECTED: SqlState = SqlState(Inner::E40P01); /// 42000 - pub const SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION: SqlState = SqlState(Cow::Borrowed("42000")); + pub const SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION: SqlState = SqlState(Inner::E42000); /// 42601 - pub const SYNTAX_ERROR: SqlState = SqlState(Cow::Borrowed("42601")); + pub const SYNTAX_ERROR: SqlState = SqlState(Inner::E42601); /// 42501 - pub const INSUFFICIENT_PRIVILEGE: SqlState = SqlState(Cow::Borrowed("42501")); + pub const INSUFFICIENT_PRIVILEGE: SqlState = SqlState(Inner::E42501); /// 42846 - pub const CANNOT_COERCE: SqlState = SqlState(Cow::Borrowed("42846")); + pub const CANNOT_COERCE: SqlState = SqlState(Inner::E42846); /// 42803 - pub const GROUPING_ERROR: SqlState = SqlState(Cow::Borrowed("42803")); + pub const GROUPING_ERROR: SqlState = SqlState(Inner::E42803); /// 42P20 - pub const WINDOWING_ERROR: SqlState = SqlState(Cow::Borrowed("42P20")); + pub const WINDOWING_ERROR: SqlState = SqlState(Inner::E42P20); /// 42P19 - pub const INVALID_RECURSION: SqlState = SqlState(Cow::Borrowed("42P19")); + pub const INVALID_RECURSION: SqlState = SqlState(Inner::E42P19); /// 42830 - pub const INVALID_FOREIGN_KEY: SqlState = SqlState(Cow::Borrowed("42830")); + pub const INVALID_FOREIGN_KEY: SqlState = SqlState(Inner::E42830); /// 42602 - pub const INVALID_NAME: SqlState = SqlState(Cow::Borrowed("42602")); + pub const INVALID_NAME: SqlState = SqlState(Inner::E42602); /// 42622 - pub const NAME_TOO_LONG: SqlState = SqlState(Cow::Borrowed("42622")); + pub const NAME_TOO_LONG: SqlState = SqlState(Inner::E42622); /// 42939 - pub const RESERVED_NAME: SqlState = SqlState(Cow::Borrowed("42939")); + pub const RESERVED_NAME: SqlState = SqlState(Inner::E42939); /// 42804 - pub const DATATYPE_MISMATCH: SqlState = SqlState(Cow::Borrowed("42804")); + pub const DATATYPE_MISMATCH: SqlState = SqlState(Inner::E42804); /// 42P18 - pub const INDETERMINATE_DATATYPE: SqlState = SqlState(Cow::Borrowed("42P18")); + pub const INDETERMINATE_DATATYPE: SqlState = SqlState(Inner::E42P18); /// 42P21 - pub const COLLATION_MISMATCH: SqlState = SqlState(Cow::Borrowed("42P21")); + pub const COLLATION_MISMATCH: SqlState = SqlState(Inner::E42P21); /// 42P22 - pub const INDETERMINATE_COLLATION: SqlState = SqlState(Cow::Borrowed("42P22")); + pub const INDETERMINATE_COLLATION: SqlState = SqlState(Inner::E42P22); /// 42809 - pub const WRONG_OBJECT_TYPE: SqlState = SqlState(Cow::Borrowed("42809")); + pub const WRONG_OBJECT_TYPE: SqlState = SqlState(Inner::E42809); /// 428C9 - pub const GENERATED_ALWAYS: SqlState = SqlState(Cow::Borrowed("428C9")); + pub const GENERATED_ALWAYS: SqlState = SqlState(Inner::E428C9); /// 42703 - pub const UNDEFINED_COLUMN: SqlState = SqlState(Cow::Borrowed("42703")); + pub const UNDEFINED_COLUMN: SqlState = SqlState(Inner::E42703); /// 42883 - pub const UNDEFINED_FUNCTION: SqlState = SqlState(Cow::Borrowed("42883")); + pub const UNDEFINED_FUNCTION: SqlState = SqlState(Inner::E42883); /// 42P01 - pub const UNDEFINED_TABLE: SqlState = SqlState(Cow::Borrowed("42P01")); + pub const UNDEFINED_TABLE: SqlState = SqlState(Inner::E42P01); /// 42P02 - pub const UNDEFINED_PARAMETER: SqlState = SqlState(Cow::Borrowed("42P02")); + pub const UNDEFINED_PARAMETER: SqlState = SqlState(Inner::E42P02); /// 42704 - pub const UNDEFINED_OBJECT: SqlState = SqlState(Cow::Borrowed("42704")); + pub const UNDEFINED_OBJECT: SqlState = SqlState(Inner::E42704); /// 42701 - pub const DUPLICATE_COLUMN: SqlState = SqlState(Cow::Borrowed("42701")); + pub const DUPLICATE_COLUMN: SqlState = SqlState(Inner::E42701); /// 42P03 - pub const DUPLICATE_CURSOR: SqlState = SqlState(Cow::Borrowed("42P03")); + pub const DUPLICATE_CURSOR: SqlState = SqlState(Inner::E42P03); /// 42P04 - pub const DUPLICATE_DATABASE: SqlState = SqlState(Cow::Borrowed("42P04")); + pub const DUPLICATE_DATABASE: SqlState = SqlState(Inner::E42P04); /// 42723 - pub const DUPLICATE_FUNCTION: SqlState = SqlState(Cow::Borrowed("42723")); + pub const DUPLICATE_FUNCTION: SqlState = SqlState(Inner::E42723); /// 42P05 - pub const DUPLICATE_PSTATEMENT: SqlState = SqlState(Cow::Borrowed("42P05")); + pub const DUPLICATE_PSTATEMENT: SqlState = SqlState(Inner::E42P05); /// 42P06 - pub const DUPLICATE_SCHEMA: SqlState = SqlState(Cow::Borrowed("42P06")); + pub const DUPLICATE_SCHEMA: SqlState = SqlState(Inner::E42P06); /// 42P07 - pub const DUPLICATE_TABLE: SqlState = SqlState(Cow::Borrowed("42P07")); + pub const DUPLICATE_TABLE: SqlState = SqlState(Inner::E42P07); /// 42712 - pub const DUPLICATE_ALIAS: SqlState = SqlState(Cow::Borrowed("42712")); + pub const DUPLICATE_ALIAS: SqlState = SqlState(Inner::E42712); /// 42710 - pub const DUPLICATE_OBJECT: SqlState = SqlState(Cow::Borrowed("42710")); + pub const DUPLICATE_OBJECT: SqlState = SqlState(Inner::E42710); /// 42702 - pub const AMBIGUOUS_COLUMN: SqlState = SqlState(Cow::Borrowed("42702")); + pub const AMBIGUOUS_COLUMN: SqlState = SqlState(Inner::E42702); /// 42725 - pub const AMBIGUOUS_FUNCTION: SqlState = SqlState(Cow::Borrowed("42725")); + pub const AMBIGUOUS_FUNCTION: SqlState = SqlState(Inner::E42725); /// 42P08 - pub const AMBIGUOUS_PARAMETER: SqlState = SqlState(Cow::Borrowed("42P08")); + pub const AMBIGUOUS_PARAMETER: SqlState = SqlState(Inner::E42P08); /// 42P09 - pub const AMBIGUOUS_ALIAS: SqlState = SqlState(Cow::Borrowed("42P09")); + pub const AMBIGUOUS_ALIAS: SqlState = SqlState(Inner::E42P09); /// 42P10 - pub const INVALID_COLUMN_REFERENCE: SqlState = SqlState(Cow::Borrowed("42P10")); + pub const INVALID_COLUMN_REFERENCE: SqlState = SqlState(Inner::E42P10); /// 42611 - pub const INVALID_COLUMN_DEFINITION: SqlState = SqlState(Cow::Borrowed("42611")); + pub const INVALID_COLUMN_DEFINITION: SqlState = SqlState(Inner::E42611); /// 42P11 - pub const INVALID_CURSOR_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P11")); + pub const INVALID_CURSOR_DEFINITION: SqlState = SqlState(Inner::E42P11); /// 42P12 - pub const INVALID_DATABASE_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P12")); + pub const INVALID_DATABASE_DEFINITION: SqlState = SqlState(Inner::E42P12); /// 42P13 - pub const INVALID_FUNCTION_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P13")); + pub const INVALID_FUNCTION_DEFINITION: SqlState = SqlState(Inner::E42P13); /// 42P14 - pub const INVALID_PSTATEMENT_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P14")); + pub const INVALID_PSTATEMENT_DEFINITION: SqlState = SqlState(Inner::E42P14); /// 42P15 - pub const INVALID_SCHEMA_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P15")); + pub const INVALID_SCHEMA_DEFINITION: SqlState = SqlState(Inner::E42P15); /// 42P16 - pub const INVALID_TABLE_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P16")); + pub const INVALID_TABLE_DEFINITION: SqlState = SqlState(Inner::E42P16); /// 42P17 - pub const INVALID_OBJECT_DEFINITION: SqlState = SqlState(Cow::Borrowed("42P17")); + pub const INVALID_OBJECT_DEFINITION: SqlState = SqlState(Inner::E42P17); /// 44000 - pub const WITH_CHECK_OPTION_VIOLATION: SqlState = SqlState(Cow::Borrowed("44000")); + pub const WITH_CHECK_OPTION_VIOLATION: SqlState = SqlState(Inner::E44000); /// 53000 - pub const INSUFFICIENT_RESOURCES: SqlState = SqlState(Cow::Borrowed("53000")); + pub const INSUFFICIENT_RESOURCES: SqlState = SqlState(Inner::E53000); /// 53100 - pub const DISK_FULL: SqlState = SqlState(Cow::Borrowed("53100")); + pub const DISK_FULL: SqlState = SqlState(Inner::E53100); /// 53200 - pub const OUT_OF_MEMORY: SqlState = SqlState(Cow::Borrowed("53200")); + pub const OUT_OF_MEMORY: SqlState = SqlState(Inner::E53200); /// 53300 - pub const TOO_MANY_CONNECTIONS: SqlState = SqlState(Cow::Borrowed("53300")); + pub const TOO_MANY_CONNECTIONS: SqlState = SqlState(Inner::E53300); /// 53400 - pub const CONFIGURATION_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("53400")); + pub const CONFIGURATION_LIMIT_EXCEEDED: SqlState = SqlState(Inner::E53400); /// 54000 - pub const PROGRAM_LIMIT_EXCEEDED: SqlState = SqlState(Cow::Borrowed("54000")); + pub const PROGRAM_LIMIT_EXCEEDED: SqlState = SqlState(Inner::E54000); /// 54001 - pub const STATEMENT_TOO_COMPLEX: SqlState = SqlState(Cow::Borrowed("54001")); + pub const STATEMENT_TOO_COMPLEX: SqlState = SqlState(Inner::E54001); /// 54011 - pub const TOO_MANY_COLUMNS: SqlState = SqlState(Cow::Borrowed("54011")); + pub const TOO_MANY_COLUMNS: SqlState = SqlState(Inner::E54011); /// 54023 - pub const TOO_MANY_ARGUMENTS: SqlState = SqlState(Cow::Borrowed("54023")); + pub const TOO_MANY_ARGUMENTS: SqlState = SqlState(Inner::E54023); /// 55000 - pub const OBJECT_NOT_IN_PREREQUISITE_STATE: SqlState = SqlState(Cow::Borrowed("55000")); + pub const OBJECT_NOT_IN_PREREQUISITE_STATE: SqlState = SqlState(Inner::E55000); /// 55006 - pub const OBJECT_IN_USE: SqlState = SqlState(Cow::Borrowed("55006")); + pub const OBJECT_IN_USE: SqlState = SqlState(Inner::E55006); /// 55P02 - pub const CANT_CHANGE_RUNTIME_PARAM: SqlState = SqlState(Cow::Borrowed("55P02")); + pub const CANT_CHANGE_RUNTIME_PARAM: SqlState = SqlState(Inner::E55P02); /// 55P03 - pub const LOCK_NOT_AVAILABLE: SqlState = SqlState(Cow::Borrowed("55P03")); + pub const LOCK_NOT_AVAILABLE: SqlState = SqlState(Inner::E55P03); /// 55P04 - pub const UNSAFE_NEW_ENUM_VALUE_USAGE: SqlState = SqlState(Cow::Borrowed("55P04")); + pub const UNSAFE_NEW_ENUM_VALUE_USAGE: SqlState = SqlState(Inner::E55P04); /// 57000 - pub const OPERATOR_INTERVENTION: SqlState = SqlState(Cow::Borrowed("57000")); + pub const OPERATOR_INTERVENTION: SqlState = SqlState(Inner::E57000); /// 57014 - pub const QUERY_CANCELED: SqlState = SqlState(Cow::Borrowed("57014")); + pub const QUERY_CANCELED: SqlState = SqlState(Inner::E57014); /// 57P01 - pub const ADMIN_SHUTDOWN: SqlState = SqlState(Cow::Borrowed("57P01")); + pub const ADMIN_SHUTDOWN: SqlState = SqlState(Inner::E57P01); /// 57P02 - pub const CRASH_SHUTDOWN: SqlState = SqlState(Cow::Borrowed("57P02")); + pub const CRASH_SHUTDOWN: SqlState = SqlState(Inner::E57P02); /// 57P03 - pub const CANNOT_CONNECT_NOW: SqlState = SqlState(Cow::Borrowed("57P03")); + pub const CANNOT_CONNECT_NOW: SqlState = SqlState(Inner::E57P03); /// 57P04 - pub const DATABASE_DROPPED: SqlState = SqlState(Cow::Borrowed("57P04")); + pub const DATABASE_DROPPED: SqlState = SqlState(Inner::E57P04); + + /// 57P05 + pub const IDLE_SESSION_TIMEOUT: SqlState = SqlState(Inner::E57P05); /// 58000 - pub const SYSTEM_ERROR: SqlState = SqlState(Cow::Borrowed("58000")); + pub const SYSTEM_ERROR: SqlState = SqlState(Inner::E58000); /// 58030 - pub const IO_ERROR: SqlState = SqlState(Cow::Borrowed("58030")); + pub const IO_ERROR: SqlState = SqlState(Inner::E58030); /// 58P01 - pub const UNDEFINED_FILE: SqlState = SqlState(Cow::Borrowed("58P01")); + pub const UNDEFINED_FILE: SqlState = SqlState(Inner::E58P01); /// 58P02 - pub const DUPLICATE_FILE: SqlState = SqlState(Cow::Borrowed("58P02")); + pub const DUPLICATE_FILE: SqlState = SqlState(Inner::E58P02); /// 72000 - pub const SNAPSHOT_TOO_OLD: SqlState = SqlState(Cow::Borrowed("72000")); + pub const SNAPSHOT_TOO_OLD: SqlState = SqlState(Inner::E72000); /// F0000 - pub const CONFIG_FILE_ERROR: SqlState = SqlState(Cow::Borrowed("F0000")); + pub const CONFIG_FILE_ERROR: SqlState = SqlState(Inner::EF0000); /// F0001 - pub const LOCK_FILE_EXISTS: SqlState = SqlState(Cow::Borrowed("F0001")); + pub const LOCK_FILE_EXISTS: SqlState = SqlState(Inner::EF0001); /// HV000 - pub const FDW_ERROR: SqlState = SqlState(Cow::Borrowed("HV000")); + pub const FDW_ERROR: SqlState = SqlState(Inner::EHV000); /// HV005 - pub const FDW_COLUMN_NAME_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV005")); + pub const FDW_COLUMN_NAME_NOT_FOUND: SqlState = SqlState(Inner::EHV005); /// HV002 - pub const FDW_DYNAMIC_PARAMETER_VALUE_NEEDED: SqlState = SqlState(Cow::Borrowed("HV002")); + pub const FDW_DYNAMIC_PARAMETER_VALUE_NEEDED: SqlState = SqlState(Inner::EHV002); /// HV010 - pub const FDW_FUNCTION_SEQUENCE_ERROR: SqlState = SqlState(Cow::Borrowed("HV010")); + pub const FDW_FUNCTION_SEQUENCE_ERROR: SqlState = SqlState(Inner::EHV010); /// HV021 - pub const FDW_INCONSISTENT_DESCRIPTOR_INFORMATION: SqlState = SqlState(Cow::Borrowed("HV021")); + pub const FDW_INCONSISTENT_DESCRIPTOR_INFORMATION: SqlState = SqlState(Inner::EHV021); /// HV024 - pub const FDW_INVALID_ATTRIBUTE_VALUE: SqlState = SqlState(Cow::Borrowed("HV024")); + pub const FDW_INVALID_ATTRIBUTE_VALUE: SqlState = SqlState(Inner::EHV024); /// HV007 - pub const FDW_INVALID_COLUMN_NAME: SqlState = SqlState(Cow::Borrowed("HV007")); + pub const FDW_INVALID_COLUMN_NAME: SqlState = SqlState(Inner::EHV007); /// HV008 - pub const FDW_INVALID_COLUMN_NUMBER: SqlState = SqlState(Cow::Borrowed("HV008")); + pub const FDW_INVALID_COLUMN_NUMBER: SqlState = SqlState(Inner::EHV008); /// HV004 - pub const FDW_INVALID_DATA_TYPE: SqlState = SqlState(Cow::Borrowed("HV004")); + pub const FDW_INVALID_DATA_TYPE: SqlState = SqlState(Inner::EHV004); /// HV006 - pub const FDW_INVALID_DATA_TYPE_DESCRIPTORS: SqlState = SqlState(Cow::Borrowed("HV006")); + pub const FDW_INVALID_DATA_TYPE_DESCRIPTORS: SqlState = SqlState(Inner::EHV006); /// HV091 - pub const FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER: SqlState = SqlState(Cow::Borrowed("HV091")); + pub const FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER: SqlState = SqlState(Inner::EHV091); /// HV00B - pub const FDW_INVALID_HANDLE: SqlState = SqlState(Cow::Borrowed("HV00B")); + pub const FDW_INVALID_HANDLE: SqlState = SqlState(Inner::EHV00B); /// HV00C - pub const FDW_INVALID_OPTION_INDEX: SqlState = SqlState(Cow::Borrowed("HV00C")); + pub const FDW_INVALID_OPTION_INDEX: SqlState = SqlState(Inner::EHV00C); /// HV00D - pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Cow::Borrowed("HV00D")); + pub const FDW_INVALID_OPTION_NAME: SqlState = SqlState(Inner::EHV00D); /// HV090 - pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = - SqlState(Cow::Borrowed("HV090")); + pub const FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH: SqlState = SqlState(Inner::EHV090); /// HV00A - pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Cow::Borrowed("HV00A")); + pub const FDW_INVALID_STRING_FORMAT: SqlState = SqlState(Inner::EHV00A); /// HV009 - pub const FDW_INVALID_USE_OF_NULL_POINTER: SqlState = SqlState(Cow::Borrowed("HV009")); + pub const FDW_INVALID_USE_OF_NULL_POINTER: SqlState = SqlState(Inner::EHV009); /// HV014 - pub const FDW_TOO_MANY_HANDLES: SqlState = SqlState(Cow::Borrowed("HV014")); + pub const FDW_TOO_MANY_HANDLES: SqlState = SqlState(Inner::EHV014); /// HV001 - pub const FDW_OUT_OF_MEMORY: SqlState = SqlState(Cow::Borrowed("HV001")); + pub const FDW_OUT_OF_MEMORY: SqlState = SqlState(Inner::EHV001); /// HV00P - pub const FDW_NO_SCHEMAS: SqlState = SqlState(Cow::Borrowed("HV00P")); + pub const FDW_NO_SCHEMAS: SqlState = SqlState(Inner::EHV00P); /// HV00J - pub const FDW_OPTION_NAME_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00J")); + pub const FDW_OPTION_NAME_NOT_FOUND: SqlState = SqlState(Inner::EHV00J); /// HV00K - pub const FDW_REPLY_HANDLE: SqlState = SqlState(Cow::Borrowed("HV00K")); + pub const FDW_REPLY_HANDLE: SqlState = SqlState(Inner::EHV00K); /// HV00Q - pub const FDW_SCHEMA_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00Q")); + pub const FDW_SCHEMA_NOT_FOUND: SqlState = SqlState(Inner::EHV00Q); /// HV00R - pub const FDW_TABLE_NOT_FOUND: SqlState = SqlState(Cow::Borrowed("HV00R")); + pub const FDW_TABLE_NOT_FOUND: SqlState = SqlState(Inner::EHV00R); /// HV00L - pub const FDW_UNABLE_TO_CREATE_EXECUTION: SqlState = SqlState(Cow::Borrowed("HV00L")); + pub const FDW_UNABLE_TO_CREATE_EXECUTION: SqlState = SqlState(Inner::EHV00L); /// HV00M - pub const FDW_UNABLE_TO_CREATE_REPLY: SqlState = SqlState(Cow::Borrowed("HV00M")); + pub const FDW_UNABLE_TO_CREATE_REPLY: SqlState = SqlState(Inner::EHV00M); /// HV00N - pub const FDW_UNABLE_TO_ESTABLISH_CONNECTION: SqlState = SqlState(Cow::Borrowed("HV00N")); + pub const FDW_UNABLE_TO_ESTABLISH_CONNECTION: SqlState = SqlState(Inner::EHV00N); /// P0000 - pub const PLPGSQL_ERROR: SqlState = SqlState(Cow::Borrowed("P0000")); + pub const PLPGSQL_ERROR: SqlState = SqlState(Inner::EP0000); /// P0001 - pub const RAISE_EXCEPTION: SqlState = SqlState(Cow::Borrowed("P0001")); + pub const RAISE_EXCEPTION: SqlState = SqlState(Inner::EP0001); /// P0002 - pub const NO_DATA_FOUND: SqlState = SqlState(Cow::Borrowed("P0002")); + pub const NO_DATA_FOUND: SqlState = SqlState(Inner::EP0002); /// P0003 - pub const TOO_MANY_ROWS: SqlState = SqlState(Cow::Borrowed("P0003")); + pub const TOO_MANY_ROWS: SqlState = SqlState(Inner::EP0003); /// P0004 - pub const ASSERT_FAILURE: SqlState = SqlState(Cow::Borrowed("P0004")); + pub const ASSERT_FAILURE: SqlState = SqlState(Inner::EP0004); /// XX000 - pub const INTERNAL_ERROR: SqlState = SqlState(Cow::Borrowed("XX000")); + pub const INTERNAL_ERROR: SqlState = SqlState(Inner::EXX000); /// XX001 - pub const DATA_CORRUPTED: SqlState = SqlState(Cow::Borrowed("XX001")); + pub const DATA_CORRUPTED: SqlState = SqlState(Inner::EXX001); /// XX002 - pub const INDEX_CORRUPTED: SqlState = SqlState(Cow::Borrowed("XX002")); + pub const INDEX_CORRUPTED: SqlState = SqlState(Inner::EXX002); } + +#[derive(PartialEq, Eq, Clone, Debug)] +#[allow(clippy::upper_case_acronyms)] +enum Inner { + E00000, + E01000, + E0100C, + E01008, + E01003, + E01007, + E01006, + E01004, + E01P01, + E02000, + E02001, + E03000, + E08000, + E08003, + E08006, + E08001, + E08004, + E08007, + E08P01, + E09000, + E0A000, + E0B000, + E0F000, + E0F001, + E0L000, + E0LP01, + E0P000, + E0Z000, + E0Z002, + E20000, + E21000, + E22000, + E2202E, + E22021, + E22008, + E22012, + E22005, + E2200B, + E22022, + E22015, + E2201E, + E22014, + E22016, + E2201F, + E2201G, + E22018, + E22007, + E22019, + E2200D, + E22025, + E22P06, + E22010, + E22023, + E22013, + E2201B, + E2201W, + E2201X, + E2202H, + E2202G, + E22009, + E2200C, + E2200G, + E22004, + E22002, + E22003, + E2200H, + E22026, + E22001, + E22011, + E22027, + E22024, + E2200F, + E22P01, + E22P02, + E22P03, + E22P04, + E22P05, + E2200L, + E2200M, + E2200N, + E2200S, + E2200T, + E22030, + E22031, + E22032, + E22033, + E22034, + E22035, + E22036, + E22037, + E22038, + E22039, + E2203A, + E2203B, + E2203C, + E2203D, + E2203E, + E2203F, + E2203G, + E23000, + E23001, + E23502, + E23503, + E23505, + E23514, + E23P01, + E24000, + E25000, + E25001, + E25002, + E25008, + E25003, + E25004, + E25005, + E25006, + E25007, + E25P01, + E25P02, + E25P03, + E26000, + E27000, + E28000, + E28P01, + E2B000, + E2BP01, + E2D000, + E2F000, + E2F005, + E2F002, + E2F003, + E2F004, + E34000, + E38000, + E38001, + E38002, + E38003, + E38004, + E39000, + E39001, + E39004, + E39P01, + E39P02, + E39P03, + E3B000, + E3B001, + E3D000, + E3F000, + E40000, + E40002, + E40001, + E40003, + E40P01, + E42000, + E42601, + E42501, + E42846, + E42803, + E42P20, + E42P19, + E42830, + E42602, + E42622, + E42939, + E42804, + E42P18, + E42P21, + E42P22, + E42809, + E428C9, + E42703, + E42883, + E42P01, + E42P02, + E42704, + E42701, + E42P03, + E42P04, + E42723, + E42P05, + E42P06, + E42P07, + E42712, + E42710, + E42702, + E42725, + E42P08, + E42P09, + E42P10, + E42611, + E42P11, + E42P12, + E42P13, + E42P14, + E42P15, + E42P16, + E42P17, + E44000, + E53000, + E53100, + E53200, + E53300, + E53400, + E54000, + E54001, + E54011, + E54023, + E55000, + E55006, + E55P02, + E55P03, + E55P04, + E57000, + E57014, + E57P01, + E57P02, + E57P03, + E57P04, + E57P05, + E58000, + E58030, + E58P01, + E58P02, + E72000, + EF0000, + EF0001, + EHV000, + EHV005, + EHV002, + EHV010, + EHV021, + EHV024, + EHV007, + EHV008, + EHV004, + EHV006, + EHV091, + EHV00B, + EHV00C, + EHV00D, + EHV090, + EHV00A, + EHV009, + EHV014, + EHV001, + EHV00P, + EHV00J, + EHV00K, + EHV00Q, + EHV00R, + EHV00L, + EHV00M, + EHV00N, + EP0000, + EP0001, + EP0002, + EP0003, + EP0004, + EXX000, + EXX001, + EXX002, + Other(Box), +} + #[rustfmt::skip] static SQLSTATE_MAP: phf::Map<&'static str, SqlState> = ::phf::Map { - key: 3213172566270843353, - disps: ::phf::Slice::Static(&[ - (3, 46), - (0, 6), - (0, 39), - (0, 0), - (0, 192), - (0, 49), - (0, 17), - (1, 138), - (0, 2), - (0, 117), - (0, 0), - (0, 33), - (16, 241), - (0, 20), - (2, 148), - (0, 0), - (0, 1), - (1, 3), - (0, 27), - (0, 21), - (1, 75), - (13, 187), - (0, 3), - (0, 42), + key: 12913932095322966823, + disps: &[ + (0, 24), (0, 12), - (0, 82), - (3, 253), - (0, 219), - (0, 6), - (4, 206), - (2, 16), - (5, 67), - (3, 15), - (0, 76), - (0, 57), - (5, 203), - (22, 134), - (1, 27), + (0, 74), + (0, 109), + (0, 11), + (0, 9), (0, 0), - (1, 113), + (4, 38), + (3, 155), + (0, 6), + (1, 242), + (0, 66), + (0, 53), + (5, 180), + (3, 221), + (7, 230), + (0, 125), + (1, 46), + (0, 11), + (1, 2), + (0, 5), + (0, 13), + (0, 171), + (0, 15), + (0, 4), + (0, 22), + (1, 85), + (0, 75), + (2, 0), + (1, 25), + (7, 47), + (0, 45), + (0, 35), + (0, 7), + (7, 124), (0, 0), + (14, 104), + (1, 183), + (61, 50), + (3, 76), + (0, 12), + (0, 7), + (4, 189), + (0, 1), + (64, 102), (0, 0), - (5, 11), - (0, 45), - (0, 62), - (0, 26), - (1, 158), - (21, 1), - (0, 4), - (5, 64), - (0, 77), - (1, 189), - ]), - entries: ::phf::Slice::Static(&[ - ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("XX002", SqlState::INDEX_CORRUPTED), - ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + (16, 192), + (24, 19), + (0, 5), + (0, 87), + (0, 89), + (0, 14), + ], + entries: &[ + ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), + ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), + ("42501", SqlState::INSUFFICIENT_PRIVILEGE), + ("22000", SqlState::DATA_EXCEPTION), + ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), + ("2200N", SqlState::INVALID_XML_CONTENT), + ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("28P01", SqlState::INVALID_PASSWORD), + ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), + ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), + ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), + ("42P09", SqlState::AMBIGUOUS_ALIAS), + ("F0000", SqlState::CONFIG_FILE_ERROR), + ("42P18", SqlState::INDETERMINATE_DATATYPE), + ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), + ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), ("42P08", SqlState::AMBIGUOUS_PARAMETER), - ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), - ("2202E", SqlState::ARRAY_ELEMENT_ERROR), - ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), - ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), - ("58P01", SqlState::UNDEFINED_FILE), - ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), - ("42P05", SqlState::DUPLICATE_PSTATEMENT), - ("P0001", SqlState::RAISE_EXCEPTION), - ("08P01", SqlState::PROTOCOL_VIOLATION), - ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), - ("HV014", SqlState::FDW_TOO_MANY_HANDLES), - ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("53200", SqlState::OUT_OF_MEMORY), - ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), - ("25000", SqlState::INVALID_TRANSACTION_STATE), - ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), - ("42602", SqlState::INVALID_NAME), ("08000", SqlState::CONNECTION_EXCEPTION), - ("57P03", SqlState::CANNOT_CONNECT_NOW), + ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), + ("22024", SqlState::UNTERMINATED_C_STRING), + ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), + ("25001", SqlState::ACTIVE_SQL_TRANSACTION), + ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), + ("42710", SqlState::DUPLICATE_OBJECT), ("2D000", SqlState::INVALID_TRANSACTION_TERMINATION), - ("3B001", SqlState::S_E_INVALID_SPECIFICATION), - ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), - ("42501", SqlState::INSUFFICIENT_PRIVILEGE), - ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), - ("2203D", SqlState::TOO_MANY_JSON_ARRAY_ELEMENTS), - ("P0003", SqlState::TOO_MANY_ROWS), - ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), - ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), - ("23505", SqlState::UNIQUE_VIOLATION), - ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), - ("22019", SqlState::INVALID_ESCAPE_CHARACTER), - ("2200S", SqlState::INVALID_XML_COMMENT), - ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), ("2200G", SqlState::MOST_SPECIFIC_TYPE_MISMATCH), - ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), - ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), - ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), - ("0LP01", SqlState::INVALID_GRANT_OPERATION), - ("34000", SqlState::INVALID_CURSOR_NAME), - ("57P02", SqlState::CRASH_SHUTDOWN), + ("22022", SqlState::INDICATOR_OVERFLOW), + ("55006", SqlState::OBJECT_IN_USE), + ("53200", SqlState::OUT_OF_MEMORY), ("22012", SqlState::DIVISION_BY_ZERO), - ("42723", SqlState::DUPLICATE_FUNCTION), + ("P0002", SqlState::NO_DATA_FOUND), + ("XX001", SqlState::DATA_CORRUPTED), + ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), + ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), + ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), + ("25000", SqlState::INVALID_TRANSACTION_STATE), + ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), + ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), ("22004", SqlState::NULL_VALUE_NOT_ALLOWED), - ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), - ("57000", SqlState::OPERATOR_INTERVENTION), - ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), - ("HV00P", SqlState::FDW_NO_SCHEMAS), - ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), - ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - ("23001", SqlState::RESTRICT_VIOLATION), - ("23514", SqlState::CHECK_VIOLATION), - ("42939", SqlState::RESERVED_NAME), - ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), - ("HV00K", SqlState::FDW_REPLY_HANDLE), - ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), - ("53100", SqlState::DISK_FULL), - ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), - ("03000", SqlState::SQL_STATEMENT_NOT_YET_COMPLETE), - ("72000", SqlState::SNAPSHOT_TOO_OLD), - ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("42804", SqlState::DATATYPE_MISMATCH), + ("42803", SqlState::GROUPING_ERROR), + ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), ("25002", SqlState::BRANCH_TRANSACTION_ALREADY_ACTIVE), - ("40002", SqlState::T_R_INTEGRITY_CONSTRAINT_VIOLATION), - ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), - ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), - ("2200L", SqlState::NOT_AN_XML_DOCUMENT), - ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), - ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), - ("22007", SqlState::INVALID_DATETIME_FORMAT), - ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), - ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), - ("42P10", SqlState::INVALID_COLUMN_REFERENCE), - ("2200D", SqlState::INVALID_ESCAPE_OCTET), - ("HV004", SqlState::FDW_INVALID_DATA_TYPE), - ("22005", SqlState::ERROR_IN_ASSIGNMENT), - ("P0002", SqlState::NO_DATA_FOUND), - ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), - ("58030", SqlState::IO_ERROR), - ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), - ("F0001", SqlState::LOCK_FILE_EXISTS), - ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), - ("01000", SqlState::WARNING), - ("22032", SqlState::INVALID_JSON_TEXT), + ("28000", SqlState::INVALID_AUTHORIZATION_SPECIFICATION), + ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), + ("22P01", SqlState::FLOATING_POINT_EXCEPTION), ("2B000", SqlState::DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST), - ("40003", SqlState::T_R_STATEMENT_COMPLETION_UNKNOWN), - ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), - ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("42723", SqlState::DUPLICATE_FUNCTION), + ("21000", SqlState::CARDINALITY_VIOLATION), + ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), + ("23505", SqlState::UNIQUE_VIOLATION), + ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), + ("23P01", SqlState::EXCLUSION_VIOLATION), ("39P03", SqlState::E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED), - ("42846", SqlState::CANNOT_COERCE), - ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), - ("23503", SqlState::FOREIGN_KEY_VIOLATION), - ("57P01", SqlState::ADMIN_SHUTDOWN), + ("42P10", SqlState::INVALID_COLUMN_REFERENCE), + ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), ("55P04", SqlState::UNSAFE_NEW_ENUM_VALUE_USAGE), - ("42P19", SqlState::INVALID_RECURSION), - ("53300", SqlState::TOO_MANY_CONNECTIONS), - ("42804", SqlState::DATATYPE_MISMATCH), - ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("P0000", SqlState::PLPGSQL_ERROR), + ("2F005", SqlState::S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT), + ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), ("0A000", SqlState::FEATURE_NOT_SUPPORTED), - ("0F000", SqlState::LOCATOR_EXCEPTION), - ("42710", SqlState::DUPLICATE_OBJECT), - ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), + ("24000", SqlState::INVALID_CURSOR_STATE), + ("25008", SqlState::HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL), + ("01003", SqlState::WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION), + ("42712", SqlState::DUPLICATE_ALIAS), + ("HV014", SqlState::FDW_TOO_MANY_HANDLES), + ("58030", SqlState::IO_ERROR), + ("2201W", SqlState::INVALID_ROW_COUNT_IN_LIMIT_CLAUSE), + ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), + ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), + ("HV005", SqlState::FDW_COLUMN_NAME_NOT_FOUND), + ("25004", SqlState::INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION), + ("54000", SqlState::PROGRAM_LIMIT_EXCEEDED), + ("20000", SqlState::CASE_NOT_FOUND), + ("2203G", SqlState::SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE), + ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), + ("22007", SqlState::INVALID_DATETIME_FORMAT), + ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), + ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), + ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), ("P0004", SqlState::ASSERT_FAILURE), - ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), - ("42P17", SqlState::INVALID_OBJECT_DEFINITION), - ("XX000", SqlState::INTERNAL_ERROR), - ("22009", SqlState::INVALID_TIME_ZONE_DISPLACEMENT_VALUE), - ("HV00J", SqlState::FDW_OPTION_NAME_NOT_FOUND), - ("42P11", SqlState::INVALID_CURSOR_DEFINITION), - ("25P01", SqlState::NO_ACTIVE_SQL_TRANSACTION), - ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), - ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), - ("22P01", SqlState::FLOATING_POINT_EXCEPTION), - ("2F000", SqlState::SQL_ROUTINE_EXCEPTION), - ("21000", SqlState::CARDINALITY_VIOLATION), - ("40001", SqlState::T_R_SERIALIZATION_FAILURE), + ("22018", SqlState::INVALID_CHARACTER_VALUE_FOR_CAST), + ("0L000", SqlState::INVALID_GRANTOR), + ("22P04", SqlState::BAD_COPY_FILE_FORMAT), + ("22031", SqlState::INVALID_ARGUMENT_FOR_SQL_JSON_DATETIME_FUNCTION), ("01P01", SqlState::WARNING_DEPRECATED_FEATURE), - ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), - ("42P12", SqlState::INVALID_DATABASE_DEFINITION), - ("42704", SqlState::UNDEFINED_OBJECT), - ("42P04", SqlState::DUPLICATE_DATABASE), - ("HV000", SqlState::FDW_ERROR), - ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), - ("HV00M", SqlState::FDW_UNABLE_TO_CREATE_REPLY), - ("42701", SqlState::DUPLICATE_COLUMN), - ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), - ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), - ("XX001", SqlState::DATA_CORRUPTED), - ("22038", SqlState::SINGLETON_SQL_JSON_ITEM_REQUIRED), + ("0LP01", SqlState::INVALID_GRANT_OPERATION), ("58P02", SqlState::DUPLICATE_FILE), - ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), - ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), - ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - ("HV00B", SqlState::FDW_INVALID_HANDLE), - ("54011", SqlState::TOO_MANY_COLUMNS), - ("0Z002", SqlState::STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER), - ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), - ("23P01", SqlState::EXCLUSION_VIOLATION), - ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), - ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), - ("HV009", SqlState::FDW_INVALID_USE_OF_NULL_POINTER), - ("22035", SqlState::NO_SQL_JSON_ITEM), - ("HV001", SqlState::FDW_OUT_OF_MEMORY), - ("3F000", SqlState::INVALID_SCHEMA_NAME), - ("0B000", SqlState::INVALID_TRANSACTION_INITIATION), - ("42830", SqlState::INVALID_FOREIGN_KEY), - ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), - ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), - ("54023", SqlState::TOO_MANY_ARGUMENTS), - ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), - ("2200H", SqlState::SEQUENCE_GENERATOR_LIMIT_EXCEEDED), - ("428C9", SqlState::GENERATED_ALWAYS), - ("53000", SqlState::INSUFFICIENT_RESOURCES), - ("42P09", SqlState::AMBIGUOUS_ALIAS), - ("08006", SqlState::CONNECTION_FAILURE), - ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), + ("26000", SqlState::INVALID_SQL_STATEMENT_NAME), ("54001", SqlState::STATEMENT_TOO_COMPLEX), - ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), - ("23502", SqlState::NOT_NULL_VIOLATION), + ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), + ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), ("22008", SqlState::DATETIME_FIELD_OVERFLOW), - ("F0000", SqlState::CONFIG_FILE_ERROR), - ("3B000", SqlState::SAVEPOINT_EXCEPTION), - ("2BP01", SqlState::DEPENDENT_OBJECTS_STILL_EXIST), - ("2202H", SqlState::INVALID_TABLESAMPLE_ARGUMENT), - ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), - ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), - ("57014", SqlState::QUERY_CANCELED), - ("55000", SqlState::OBJECT_NOT_IN_PREREQUISITE_STATE), - ("40000", SqlState::TRANSACTION_ROLLBACK), - ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), - ("22033", SqlState::INVALID_SQL_JSON_SUBSCRIPT), + ("42P06", SqlState::DUPLICATE_SCHEMA), + ("25007", SqlState::SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED), + ("42P20", SqlState::WINDOWING_ERROR), + ("HV091", SqlState::FDW_INVALID_DESCRIPTOR_FIELD_IDENTIFIER), + ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), + ("42702", SqlState::AMBIGUOUS_COLUMN), ("02000", SqlState::NO_DATA), - ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), - ("3D000", SqlState::INVALID_CATALOG_NAME), - ("2200M", SqlState::INVALID_XML_DOCUMENT), - ("42611", SqlState::INVALID_COLUMN_DEFINITION), - ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), - ("22P05", SqlState::UNTRANSLATABLE_CHARACTER), - ("42883", SqlState::UNDEFINED_FUNCTION), - ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("54011", SqlState::TOO_MANY_COLUMNS), + ("HV004", SqlState::FDW_INVALID_DATA_TYPE), + ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), + ("42701", SqlState::DUPLICATE_COLUMN), + ("08P01", SqlState::PROTOCOL_VIOLATION), + ("42622", SqlState::NAME_TOO_LONG), + ("P0003", SqlState::TOO_MANY_ROWS), + ("22003", SqlState::NUMERIC_VALUE_OUT_OF_RANGE), + ("42P03", SqlState::DUPLICATE_CURSOR), + ("23001", SqlState::RESTRICT_VIOLATION), + ("57000", SqlState::OPERATOR_INTERVENTION), ("22027", SqlState::TRIM_ERROR), - ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - ("0L000", SqlState::INVALID_GRANTOR), - ("42725", SqlState::AMBIGUOUS_FUNCTION), - ("42601", SqlState::SYNTAX_ERROR), - ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), - ("42702", SqlState::AMBIGUOUS_COLUMN), - ("22024", SqlState::UNTERMINATED_C_STRING), - ("22023", SqlState::INVALID_PARAMETER_VALUE), - ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), - ("22010", SqlState::INVALID_INDICATOR_PARAMETER_VALUE), - ("42P16", SqlState::INVALID_TABLE_DEFINITION), - ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), - ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), - ("55006", SqlState::OBJECT_IN_USE), - ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), - ("42803", SqlState::GROUPING_ERROR), - ("22021", SqlState::CHARACTER_NOT_IN_REPERTOIRE), - ("08004", SqlState::SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION), - ("42P15", SqlState::INVALID_SCHEMA_DEFINITION), - ("25006", SqlState::READ_ONLY_SQL_TRANSACTION), - ("42P02", SqlState::UNDEFINED_PARAMETER), - ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), - ("42P22", SqlState::INDETERMINATE_COLLATION), - ("0F001", SqlState::L_E_INVALID_SPECIFICATION), + ("42P12", SqlState::INVALID_DATABASE_DEFINITION), + ("3B000", SqlState::SAVEPOINT_EXCEPTION), ("2201B", SqlState::INVALID_REGULAR_EXPRESSION), - ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), - ("42P07", SqlState::DUPLICATE_TABLE), - ("02001", SqlState::NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED), - ("58000", SqlState::SYSTEM_ERROR), - ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), - ("42622", SqlState::NAME_TOO_LONG), - ("20000", SqlState::CASE_NOT_FOUND), - ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), - ("22022", SqlState::INDICATOR_OVERFLOW), - ("42P18", SqlState::INDETERMINATE_DATATYPE), - ("01008", SqlState::WARNING_IMPLICIT_ZERO_BIT_PADDING), + ("22030", SqlState::DUPLICATE_JSON_OBJECT_KEY_VALUE), + ("2F004", SqlState::S_R_E_READING_SQL_DATA_NOT_PERMITTED), + ("428C9", SqlState::GENERATED_ALWAYS), + ("2200S", SqlState::INVALID_XML_COMMENT), + ("22039", SqlState::SQL_JSON_ARRAY_NOT_FOUND), + ("42809", SqlState::WRONG_OBJECT_TYPE), + ("2201X", SqlState::INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE), ("39001", SqlState::E_R_I_E_INVALID_SQLSTATE_RETURNED), - ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), - ("2200N", SqlState::INVALID_XML_CONTENT), - ("42P01", SqlState::UNDEFINED_TABLE), - ("42P03", SqlState::DUPLICATE_CURSOR), - ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("25P02", SqlState::IN_FAILED_SQL_TRANSACTION), + ("0P000", SqlState::INVALID_ROLE_SPECIFICATION), + ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), + ("53100", SqlState::DISK_FULL), + ("42601", SqlState::SYNTAX_ERROR), + ("23000", SqlState::INTEGRITY_CONSTRAINT_VIOLATION), + ("HV006", SqlState::FDW_INVALID_DATA_TYPE_DESCRIPTORS), + ("HV00B", SqlState::FDW_INVALID_HANDLE), + ("HV00Q", SqlState::FDW_SCHEMA_NOT_FOUND), + ("01000", SqlState::WARNING), + ("42883", SqlState::UNDEFINED_FUNCTION), + ("57P01", SqlState::ADMIN_SHUTDOWN), ("22037", SqlState::NON_UNIQUE_KEYS_IN_A_JSON_OBJECT), - ("22000", SqlState::DATA_EXCEPTION), - ("28P01", SqlState::INVALID_PASSWORD), - ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), - ("42P06", SqlState::DUPLICATE_SCHEMA), - ("HV00D", SqlState::FDW_INVALID_OPTION_NAME), + ("00000", SqlState::SUCCESSFUL_COMPLETION), ("55P03", SqlState::LOCK_NOT_AVAILABLE), - ("HV021", SqlState::FDW_INCONSISTENT_DESCRIPTOR_INFORMATION), - ("42712", SqlState::DUPLICATE_ALIAS), - ("38000", SqlState::EXTERNAL_ROUTINE_EXCEPTION), - ("HV00N", SqlState::FDW_UNABLE_TO_ESTABLISH_CONNECTION), - ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), + ("42P01", SqlState::UNDEFINED_TABLE), + ("42830", SqlState::INVALID_FOREIGN_KEY), + ("22005", SqlState::ERROR_IN_ASSIGNMENT), + ("22025", SqlState::INVALID_ESCAPE_SEQUENCE), + ("XX002", SqlState::INDEX_CORRUPTED), + ("42P16", SqlState::INVALID_TABLE_DEFINITION), + ("55P02", SqlState::CANT_CHANGE_RUNTIME_PARAM), + ("22019", SqlState::INVALID_ESCAPE_CHARACTER), + ("P0001", SqlState::RAISE_EXCEPTION), + ("72000", SqlState::SNAPSHOT_TOO_OLD), + ("42P11", SqlState::INVALID_CURSOR_DEFINITION), + ("40P01", SqlState::T_R_DEADLOCK_DETECTED), + ("57P02", SqlState::CRASH_SHUTDOWN), + ("HV00A", SqlState::FDW_INVALID_STRING_FORMAT), + ("2F002", SqlState::S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("23503", SqlState::FOREIGN_KEY_VIOLATION), + ("40000", SqlState::TRANSACTION_ROLLBACK), + ("22032", SqlState::INVALID_JSON_TEXT), + ("2202E", SqlState::ARRAY_ELEMENT_ERROR), + ("42P19", SqlState::INVALID_RECURSION), + ("42611", SqlState::INVALID_COLUMN_DEFINITION), + ("42P13", SqlState::INVALID_FUNCTION_DEFINITION), + ("25003", SqlState::INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION), + ("39P02", SqlState::E_R_I_E_SRF_PROTOCOL_VIOLATED), + ("XX000", SqlState::INTERNAL_ERROR), + ("08006", SqlState::CONNECTION_FAILURE), + ("57P04", SqlState::DATABASE_DROPPED), + ("42P07", SqlState::DUPLICATE_TABLE), + ("22P03", SqlState::INVALID_BINARY_REPRESENTATION), + ("22035", SqlState::NO_SQL_JSON_ITEM), + ("42P14", SqlState::INVALID_PSTATEMENT_DEFINITION), + ("01007", SqlState::WARNING_PRIVILEGE_NOT_GRANTED), + ("38004", SqlState::E_R_E_READING_SQL_DATA_NOT_PERMITTED), ("42P21", SqlState::COLLATION_MISMATCH), + ("0Z000", SqlState::DIAGNOSTICS_EXCEPTION), + ("HV001", SqlState::FDW_OUT_OF_MEMORY), + ("0F000", SqlState::LOCATOR_EXCEPTION), + ("22013", SqlState::INVALID_PRECEDING_OR_FOLLOWING_SIZE), + ("2201E", SqlState::INVALID_ARGUMENT_FOR_LOG), + ("22011", SqlState::SUBSTRING_ERROR), + ("42602", SqlState::INVALID_NAME), + ("01004", SqlState::WARNING_STRING_DATA_RIGHT_TRUNCATION), + ("42P02", SqlState::UNDEFINED_PARAMETER), + ("2203C", SqlState::SQL_JSON_OBJECT_NOT_FOUND), + ("HV002", SqlState::FDW_DYNAMIC_PARAMETER_VALUE_NEEDED), + ("0F001", SqlState::L_E_INVALID_SPECIFICATION), + ("58P01", SqlState::UNDEFINED_FILE), + ("38001", SqlState::E_R_E_CONTAINING_SQL_NOT_PERMITTED), ("42703", SqlState::UNDEFINED_COLUMN), - ("57P04", SqlState::DATABASE_DROPPED), - ("22P04", SqlState::BAD_COPY_FILE_FORMAT), - ("01006", SqlState::WARNING_PRIVILEGE_NOT_REVOKED), + ("57P05", SqlState::IDLE_SESSION_TIMEOUT), + ("57P03", SqlState::CANNOT_CONNECT_NOW), ("HV007", SqlState::FDW_INVALID_COLUMN_NAME), - ("HV00C", SqlState::FDW_INVALID_OPTION_INDEX), - ("25001", SqlState::ACTIVE_SQL_TRANSACTION), - ("42809", SqlState::WRONG_OBJECT_TYPE), + ("22014", SqlState::INVALID_ARGUMENT_FOR_NTILE), + ("22P06", SqlState::NONSTANDARD_USE_OF_ESCAPE_CHARACTER), + ("2203F", SqlState::SQL_JSON_SCALAR_REQUIRED), + ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), + ("09000", SqlState::TRIGGERED_ACTION_EXCEPTION), + ("2201F", SqlState::INVALID_ARGUMENT_FOR_POWER_FUNCTION), + ("08003", SqlState::CONNECTION_DOES_NOT_EXIST), + ("38002", SqlState::E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED), + ("F0001", SqlState::LOCK_FILE_EXISTS), + ("42P22", SqlState::INDETERMINATE_COLLATION), + ("2200C", SqlState::INVALID_USE_OF_ESCAPE_CHARACTER), + ("2203E", SqlState::TOO_MANY_JSON_OBJECT_MEMBERS), + ("23514", SqlState::CHECK_VIOLATION), ("22P02", SqlState::INVALID_TEXT_REPRESENTATION), - ("42P20", SqlState::WINDOWING_ERROR), - ("24000", SqlState::INVALID_CURSOR_STATE), - ("22011", SqlState::SUBSTRING_ERROR), - ("00000", SqlState::SUCCESSFUL_COMPLETION), - ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), - ("P0000", SqlState::PLPGSQL_ERROR), + ("54023", SqlState::TOO_MANY_ARGUMENTS), + ("2200T", SqlState::INVALID_XML_PROCESSING_INSTRUCTION), + ("22016", SqlState::INVALID_ARGUMENT_FOR_NTH_VALUE), + ("25P03", SqlState::IDLE_IN_TRANSACTION_SESSION_TIMEOUT), + ("3B001", SqlState::S_E_INVALID_SPECIFICATION), + ("08001", SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION), + ("22036", SqlState::NON_NUMERIC_SQL_JSON_ITEM), + ("3F000", SqlState::INVALID_SCHEMA_NAME), + ("39P01", SqlState::E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + ("22026", SqlState::STRING_DATA_LENGTH_MISMATCH), + ("42P17", SqlState::INVALID_OBJECT_DEFINITION), + ("22034", SqlState::MORE_THAN_ONE_SQL_JSON_ITEM), + ("HV000", SqlState::FDW_ERROR), + ("2200B", SqlState::ESCAPE_CHARACTER_CONFLICT), + ("HV008", SqlState::FDW_INVALID_COLUMN_NUMBER), + ("34000", SqlState::INVALID_CURSOR_NAME), + ("2201G", SqlState::INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + ("44000", SqlState::WITH_CHECK_OPTION_VIOLATION), + ("HV010", SqlState::FDW_FUNCTION_SEQUENCE_ERROR), + ("39004", SqlState::E_R_I_E_NULL_VALUE_NOT_ALLOWED), + ("22001", SqlState::STRING_DATA_RIGHT_TRUNCATION), + ("3D000", SqlState::INVALID_CATALOG_NAME), + ("25005", SqlState::NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION), + ("2200L", SqlState::NOT_AN_XML_DOCUMENT), + ("27000", SqlState::TRIGGERED_DATA_CHANGE_VIOLATION), + ("HV090", SqlState::FDW_INVALID_STRING_LENGTH_OR_BUFFER_LENGTH), + ("42939", SqlState::RESERVED_NAME), + ("58000", SqlState::SYSTEM_ERROR), + ("2200M", SqlState::INVALID_XML_DOCUMENT), + ("HV00L", SqlState::FDW_UNABLE_TO_CREATE_EXECUTION), + ("57014", SqlState::QUERY_CANCELED), + ("23502", SqlState::NOT_NULL_VIOLATION), + ("22002", SqlState::NULL_VALUE_NO_INDICATOR_PARAMETER), + ("HV00R", SqlState::FDW_TABLE_NOT_FOUND), + ("HV00P", SqlState::FDW_NO_SCHEMAS), ("38003", SqlState::E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), - ("0100C", SqlState::WARNING_DYNAMIC_RESULT_SETS_RETURNED), - ("2200F", SqlState::ZERO_LENGTH_CHARACTER_STRING), - ("40P01", SqlState::T_R_DEADLOCK_DETECTED), - ]), + ("39000", SqlState::EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + ("22015", SqlState::INTERVAL_FIELD_OVERFLOW), + ("HV00K", SqlState::FDW_REPLY_HANDLE), + ("HV024", SqlState::FDW_INVALID_ATTRIBUTE_VALUE), + ("2200D", SqlState::INVALID_ESCAPE_OCTET), + ("08007", SqlState::TRANSACTION_RESOLUTION_UNKNOWN), + ("2F003", SqlState::S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED), + ("42725", SqlState::AMBIGUOUS_FUNCTION), + ("2203A", SqlState::SQL_JSON_MEMBER_NOT_FOUND), + ("42846", SqlState::CANNOT_COERCE), + ("42P04", SqlState::DUPLICATE_DATABASE), + ("42000", SqlState::SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION), + ("2203B", SqlState::SQL_JSON_NUMBER_NOT_FOUND), + ("42P05", SqlState::DUPLICATE_PSTATEMENT), + ("53300", SqlState::TOO_MANY_CONNECTIONS), + ("53400", SqlState::CONFIGURATION_LIMIT_EXCEEDED), + ("42704", SqlState::UNDEFINED_OBJECT), + ("2202G", SqlState::INVALID_TABLESAMPLE_REPEAT), + ("22023", SqlState::INVALID_PARAMETER_VALUE), + ("53000", SqlState::INSUFFICIENT_RESOURCES), + ], }; diff --git a/tokio-postgres/src/generic_client.rs b/tokio-postgres/src/generic_client.rs index 30351bd0a..dcda147b5 100644 --- a/tokio-postgres/src/generic_client.rs +++ b/tokio-postgres/src/generic_client.rs @@ -1,6 +1,6 @@ use crate::query::RowStream; -use crate::types::{ToSql, Type}; -use crate::{Client, Error, Row, Statement, ToStatement, Transaction}; +use crate::types::{BorrowToSql, ToSql, Type}; +use crate::{Client, Error, Row, SimpleQueryMessage, Statement, ToStatement, Transaction}; use async_trait::async_trait; mod private { @@ -12,24 +12,25 @@ mod private { /// This trait is "sealed", and cannot be implemented outside of this crate. #[async_trait] pub trait GenericClient: private::Sealed { - /// Like `Client::execute`. + /// Like [`Client::execute`]. async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where T: ?Sized + ToStatement + Sync + Send; - /// Like `Client::execute_raw`. - async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + /// Like [`Client::execute_raw`]. + async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator; - /// Like `Client::query`. + /// Like [`Client::query`]. async fn query(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result, Error> where T: ?Sized + ToStatement + Sync + Send; - /// Like `Client::query_one`. + /// Like [`Client::query_one`]. async fn query_one( &self, statement: &T, @@ -38,7 +39,7 @@ pub trait GenericClient: private::Sealed { where T: ?Sized + ToStatement + Sync + Send; - /// Like `Client::query_opt`. + /// Like [`Client::query_opt`]. async fn query_opt( &self, statement: &T, @@ -47,25 +48,48 @@ pub trait GenericClient: private::Sealed { where T: ?Sized + ToStatement + Sync + Send; - /// Like `Client::query_raw`. - async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + /// Like [`Client::query_raw`]. + async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator; - /// Like `Client::prepare`. + /// Like [`Client::query_typed`] + async fn query_typed( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error>; + + /// Like [`Client::query_typed_raw`] + async fn query_typed_raw(&self, statement: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator + Sync + Send; + + /// Like [`Client::prepare`]. async fn prepare(&self, query: &str) -> Result; - /// Like `Client::prepare_typed`. + /// Like [`Client::prepare_typed`]. async fn prepare_typed( &self, query: &str, parameter_types: &[Type], ) -> Result; - /// Like `Client::transaction`. - async fn transaction(&mut self) -> Result, Error>; + /// Like [`Client::transaction`]. + async fn transaction<'a>(&'a mut self) -> Result, Error>; + + /// Like [`Client::batch_execute`]. + async fn batch_execute(&self, query: &str) -> Result<(), Error>; + + /// Like [`Client::simple_query`]. + async fn simple_query(&self, query: &str) -> Result, Error>; + + /// Returns a reference to the underlying [`Client`]. + fn client(&self) -> &Client; } impl private::Sealed for Client {} @@ -79,10 +103,11 @@ impl GenericClient for Client { self.execute(query, params).await } - async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator, { self.execute_raw(statement, params).await @@ -117,15 +142,32 @@ impl GenericClient for Client { self.query_opt(statement, params).await } - async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator, { self.query_raw(statement, params).await } + async fn query_typed( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_typed(statement, params).await + } + + async fn query_typed_raw(&self, statement: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator + Sync + Send, + { + self.query_typed_raw(statement, params).await + } + async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } @@ -138,14 +180,27 @@ impl GenericClient for Client { self.prepare_typed(query, parameter_types).await } - async fn transaction(&mut self) -> Result, Error> { + async fn transaction<'a>(&'a mut self) -> Result, Error> { self.transaction().await } + + async fn batch_execute(&self, query: &str) -> Result<(), Error> { + self.batch_execute(query).await + } + + async fn simple_query(&self, query: &str) -> Result, Error> { + self.simple_query(query).await + } + + fn client(&self) -> &Client { + self + } } impl private::Sealed for Transaction<'_> {} #[async_trait] +#[allow(clippy::needless_lifetimes)] impl GenericClient for Transaction<'_> { async fn execute(&self, query: &T, params: &[&(dyn ToSql + Sync)]) -> Result where @@ -154,10 +209,11 @@ impl GenericClient for Transaction<'_> { self.execute(query, params).await } - async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator, { self.execute_raw(statement, params).await @@ -192,15 +248,32 @@ impl GenericClient for Transaction<'_> { self.query_opt(statement, params).await } - async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement + Sync + Send, - I: IntoIterator + Sync + Send, + P: BorrowToSql, + I: IntoIterator + Sync + Send, I::IntoIter: ExactSizeIterator, { self.query_raw(statement, params).await } + async fn query_typed( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.query_typed(statement, params).await + } + + async fn query_typed_raw(&self, statement: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator + Sync + Send, + { + self.query_typed_raw(statement, params).await + } + async fn prepare(&self, query: &str) -> Result { self.prepare(query).await } @@ -217,4 +290,16 @@ impl GenericClient for Transaction<'_> { async fn transaction<'a>(&'a mut self) -> Result, Error> { self.transaction().await } + + async fn batch_execute(&self, query: &str) -> Result<(), Error> { + self.batch_execute(query).await + } + + async fn simple_query(&self, query: &str) -> Result, Error> { + self.simple_query(query).await + } + + fn client(&self) -> &Client { + self.client() + } } diff --git a/tokio-postgres/src/keepalive.rs b/tokio-postgres/src/keepalive.rs new file mode 100644 index 000000000..7bdd76341 --- /dev/null +++ b/tokio-postgres/src/keepalive.rs @@ -0,0 +1,38 @@ +use socket2::TcpKeepalive; +use std::time::Duration; + +#[derive(Clone, PartialEq, Eq)] +pub(crate) struct KeepaliveConfig { + pub idle: Duration, + pub interval: Option, + pub retries: Option, +} + +impl From<&KeepaliveConfig> for TcpKeepalive { + fn from(keepalive_config: &KeepaliveConfig) -> Self { + let mut tcp_keepalive = Self::new().with_time(keepalive_config.idle); + + #[cfg(not(any( + target_os = "aix", + target_os = "redox", + target_os = "solaris", + target_os = "openbsd" + )))] + if let Some(interval) = keepalive_config.interval { + tcp_keepalive = tcp_keepalive.with_interval(interval); + } + + #[cfg(not(any( + target_os = "aix", + target_os = "redox", + target_os = "solaris", + target_os = "windows", + target_os = "openbsd" + )))] + if let Some(retries) = keepalive_config.retries { + tcp_keepalive = tcp_keepalive.with_retries(retries); + } + + tcp_keepalive + } +} diff --git a/tokio-postgres/src/lib.rs b/tokio-postgres/src/lib.rs index 2845fcf61..ec843d511 100644 --- a/tokio-postgres/src/lib.rs +++ b/tokio-postgres/src/lib.rs @@ -69,7 +69,7 @@ //! combinator): //! //! ```rust -//! use futures::future; +//! use futures_util::future; //! use std::future::Future; //! use tokio_postgres::{Client, Error, Statement}; //! @@ -103,15 +103,20 @@ //! //! | Feature | Description | Extra dependencies | Default | //! | ------- | ----------- | ------------------ | ------- | -//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 0.2 with the features `dns`, `net` and `time` | yes | +//! | `runtime` | Enable convenience API for the connection process based on the `tokio` crate. | [tokio](https://crates.io/crates/tokio) 1.0 with the features `net` and `time` | yes | +//! | `array-impls` | Enables `ToSql` and `FromSql` trait impls for arrays | - | no | //! | `with-bit-vec-0_6` | Enable support for the `bit-vec` crate. | [bit-vec](https://crates.io/crates/bit-vec) 0.6 | no | //! | `with-chrono-0_4` | Enable support for the `chrono` crate. | [chrono](https://crates.io/crates/chrono) 0.4 | no | -//! | `with-eui48-0_4` | Enable support for the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 0.4 | no | -//! | `with-geo-types-0_4` | Enable support for the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types) 0.4 | no | -//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [eui48](https://crates.io/crates/serde_json) 1.0 | no | +//! | `with-eui48-0_4` | Enable support for the 0.4 version of the `eui48` crate. This is deprecated and will be removed. | [eui48](https://crates.io/crates/eui48) 0.4 | no | +//! | `with-eui48-1` | Enable support for the 1.0 version of the `eui48` crate. | [eui48](https://crates.io/crates/eui48) 1.0 | no | +//! | `with-geo-types-0_6` | Enable support for the 0.6 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.6.0) 0.6 | no | +//! | `with-geo-types-0_7` | Enable support for the 0.7 version of the `geo-types` crate. | [geo-types](https://crates.io/crates/geo-types/0.7.0) 0.7 | no | +//! | `with-jiff-0_1` | Enable support for the 0.1 version of the `jiff` crate. | [jiff](https://crates.io/crates/jiff/0.1.0) 0.1 | no | +//! | `with-serde_json-1` | Enable support for the `serde_json` crate. | [serde_json](https://crates.io/crates/serde_json) 1.0 | no | //! | `with-uuid-0_8` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 0.8 | no | -//! | `with-time-0_2` | Enable support for the `time` crate. | [time](https://crates.io/crates/time) 0.2 | no | -#![doc(html_root_url = "https://docs.rs/tokio-postgres/0.5")] +//! | `with-uuid-1` | Enable support for the `uuid` crate. | [uuid](https://crates.io/crates/uuid) 1.0 | no | +//! | `with-time-0_2` | Enable support for the 0.2 version of the `time` crate. | [time](https://crates.io/crates/time/0.2.0) 0.2 | no | +//! | `with-time-0_3` | Enable support for the 0.3 version of the `time` crate. | [time](https://crates.io/crates/time/0.3.0) 0.3 | no | #![warn(rust_2018_idioms, clippy::all, missing_docs)] pub use crate::cancel_token::CancelToken; @@ -126,7 +131,7 @@ pub use crate::generic_client::GenericClient; pub use crate::portal::Portal; pub use crate::query::RowStream; pub use crate::row::{Row, SimpleQueryRow}; -pub use crate::simple_query::SimpleQueryStream; +pub use crate::simple_query::{SimpleColumn, SimpleQueryStream}; #[cfg(feature = "runtime")] pub use crate::socket::Socket; pub use crate::statement::{Column, Statement}; @@ -137,6 +142,7 @@ pub use crate::to_statement::ToStatement; pub use crate::transaction::Transaction; pub use crate::transaction_builder::{IsolationLevel, TransactionBuilder}; use crate::types::ToSql; +use std::sync::Arc; pub mod binary_copy; mod bind; @@ -158,6 +164,8 @@ mod copy_in; mod copy_out; pub mod error; mod generic_client; +#[cfg(not(target_arch = "wasm32"))] +mod keepalive; mod maybe_tls_stream; mod portal; mod prepare; @@ -219,6 +227,7 @@ impl Notification { /// An asynchronous message from the server. #[allow(clippy::large_enum_variant)] +#[derive(Debug, Clone)] #[non_exhaustive] pub enum AsyncMessage { /// A notice. @@ -232,6 +241,7 @@ pub enum AsyncMessage { } /// Message returned by the `SimpleQuery` stream. +#[derive(Debug)] #[non_exhaustive] pub enum SimpleQueryMessage { /// A row of data. @@ -240,6 +250,8 @@ pub enum SimpleQueryMessage { /// /// The number of rows modified or selected is returned. CommandComplete(u64), + /// Column values of the proceeding row values + RowDescription(Arc<[SimpleColumn]>), } fn slice_iter<'a>( diff --git a/tokio-postgres/src/maybe_tls_stream.rs b/tokio-postgres/src/maybe_tls_stream.rs index 652236ee8..73b0c4721 100644 --- a/tokio-postgres/src/maybe_tls_stream.rs +++ b/tokio-postgres/src/maybe_tls_stream.rs @@ -1,10 +1,8 @@ use crate::tls::{ChannelBinding, TlsStream}; -use bytes::{Buf, BufMut}; use std::io; -use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; pub enum MaybeTlsStream { Raw(S), @@ -16,38 +14,16 @@ where S: AsyncRead + Unpin, T: AsyncRead + Unpin, { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - match self { - MaybeTlsStream::Raw(s) => s.prepare_uninitialized_buffer(buf), - MaybeTlsStream::Tls(s) => s.prepare_uninitialized_buffer(buf), - } - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { match &mut *self { MaybeTlsStream::Raw(s) => Pin::new(s).poll_read(cx, buf), MaybeTlsStream::Tls(s) => Pin::new(s).poll_read(cx, buf), } } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - B: BufMut, - { - match &mut *self { - MaybeTlsStream::Raw(s) => Pin::new(s).poll_read_buf(cx, buf), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_read_buf(cx, buf), - } - } } impl AsyncWrite for MaybeTlsStream @@ -79,21 +55,6 @@ where MaybeTlsStream::Tls(s) => Pin::new(s).poll_shutdown(cx), } } - - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - B: Buf, - { - match &mut *self { - MaybeTlsStream::Raw(s) => Pin::new(s).poll_write_buf(cx, buf), - MaybeTlsStream::Tls(s) => Pin::new(s).poll_write_buf(cx, buf), - } - } } impl TlsStream for MaybeTlsStream diff --git a/tokio-postgres/src/prepare.rs b/tokio-postgres/src/prepare.rs index 49397debf..1d9bacb16 100644 --- a/tokio-postgres/src/prepare.rs +++ b/tokio-postgres/src/prepare.rs @@ -7,7 +7,7 @@ use crate::{query, slice_iter}; use crate::{Column, Error, Statement}; use bytes::Bytes; use fallible_iterator::FallibleIterator; -use futures::{pin_mut, TryStreamExt}; +use futures_util::{pin_mut, TryStreamExt}; use log::debug; use postgres_protocol::message::backend::Message; use postgres_protocol::message::frontend; @@ -86,7 +86,7 @@ pub async fn prepare( let mut parameters = vec![]; let mut it = parameter_description.parameters(); while let Some(oid) = it.next().map_err(Error::parse)? { - let type_ = get_type(&client, oid).await?; + let type_ = get_type(client, oid).await?; parameters.push(type_); } @@ -94,13 +94,18 @@ pub async fn prepare( if let Some(row_description) = row_description { let mut it = row_description.fields(); while let Some(field) = it.next().map_err(Error::parse)? { - let type_ = get_type(&client, field.type_oid()).await?; - let column = Column::new(field.name().to_string(), type_); + let type_ = get_type(client, field.type_oid()).await?; + let column = Column { + name: field.name().to_string(), + table_oid: Some(field.table_oid()).filter(|n| *n != 0), + column_id: Some(field.column_id()).filter(|n| *n != 0), + r#type: type_, + }; columns.push(column); } } - Ok(Statement::new(&client, name, parameters, columns)) + Ok(Statement::new(client, name, parameters, columns)) } fn prepare_rec<'a>( @@ -120,13 +125,13 @@ fn encode(client: &InnerClient, name: &str, query: &str, types: &[Type]) -> Resu client.with_buf(|buf| { frontend::parse(name, query, types.iter().map(Type::oid), buf).map_err(Error::encode)?; - frontend::describe(b'S', &name, buf).map_err(Error::encode)?; + frontend::describe(b'S', name, buf).map_err(Error::encode)?; frontend::sync(buf); Ok(buf.split().freeze()) }) } -async fn get_type(client: &Arc, oid: Oid) -> Result { +pub(crate) async fn get_type(client: &Arc, oid: Oid) -> Result { if let Some(type_) = Type::from_oid(oid) { return Ok(type_); } diff --git a/tokio-postgres/src/query.rs b/tokio-postgres/src/query.rs index 7792f0a8a..2fcb22d57 100644 --- a/tokio-postgres/src/query.rs +++ b/tokio-postgres/src/query.rs @@ -1,25 +1,44 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; -use crate::types::{IsNull, ToSql}; -use crate::{Error, Portal, Row, Statement}; +use crate::prepare::get_type; +use crate::types::{BorrowToSql, IsNull}; +use crate::{Column, Error, Portal, Row, Statement}; use bytes::{Bytes, BytesMut}; -use futures::{ready, Stream}; +use fallible_iterator::FallibleIterator; +use futures_util::{ready, Stream}; use log::{debug, log_enabled, Level}; use pin_project_lite::pin_project; -use postgres_protocol::message::backend::Message; +use postgres_protocol::message::backend::{CommandCompleteBody, Message}; use postgres_protocol::message::frontend; +use postgres_types::Type; +use std::fmt; use std::marker::PhantomPinned; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; -pub async fn query<'a, I>( +struct BorrowToSqlParamsDebug<'a, T>(&'a [T]); + +impl fmt::Debug for BorrowToSqlParamsDebug<'_, T> +where + T: BorrowToSql, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.0.iter().map(|x| x.borrow_to_sql())) + .finish() + } +} + +pub async fn query( client: &InnerClient, statement: Statement, params: I, ) -> Result where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let buf = if log_enabled!(Level::Debug) { @@ -27,7 +46,7 @@ where debug!( "executing statement {} with parameters: {:?}", statement.name(), - params, + BorrowToSqlParamsDebug(params.as_slice()), ); encode(client, &statement, params)? } else { @@ -37,10 +56,73 @@ where Ok(RowStream { statement, responses, + rows_affected: None, _p: PhantomPinned, }) } +pub async fn query_typed( + client: &Arc, + query: &str, + params: I, +) -> Result +where + P: BorrowToSql, + I: IntoIterator, +{ + let buf = { + let params = params.into_iter().collect::>(); + let param_oids = params.iter().map(|(_, t)| t.oid()).collect::>(); + + client.with_buf(|buf| { + frontend::parse("", query, param_oids.into_iter(), buf).map_err(Error::parse)?; + encode_bind_raw("", params, "", buf)?; + frontend::describe(b'S', "", buf).map_err(Error::encode)?; + frontend::execute("", 0, buf).map_err(Error::encode)?; + frontend::sync(buf); + + Ok(buf.split().freeze()) + })? + }; + + let mut responses = client.send(RequestMessages::Single(FrontendMessage::Raw(buf)))?; + + loop { + match responses.next().await? { + Message::ParseComplete | Message::BindComplete | Message::ParameterDescription(_) => {} + Message::NoData => { + return Ok(RowStream { + statement: Statement::unnamed(vec![], vec![]), + responses, + rows_affected: None, + _p: PhantomPinned, + }); + } + Message::RowDescription(row_description) => { + let mut columns: Vec = vec![]; + let mut it = row_description.fields(); + while let Some(field) = it.next().map_err(Error::parse)? { + let type_ = get_type(client, field.type_oid()).await?; + let column = Column { + name: field.name().to_string(), + table_oid: Some(field.table_oid()).filter(|n| *n != 0), + column_id: Some(field.column_id()).filter(|n| *n != 0), + r#type: type_, + }; + columns.push(column); + } + return Ok(RowStream { + statement: Statement::unnamed(vec![], columns), + responses, + rows_affected: None, + _p: PhantomPinned, + }); + } + _ => return Err(Error::unexpected_message()), + } + } +} + pub async fn query_portal( client: &InnerClient, portal: &Portal, @@ -57,17 +139,32 @@ pub async fn query_portal( Ok(RowStream { statement: portal.statement().clone(), responses, + rows_affected: None, _p: PhantomPinned, }) } -pub async fn execute<'a, I>( +/// Extract the number of rows affected from [`CommandCompleteBody`]. +pub fn extract_row_affected(body: &CommandCompleteBody) -> Result { + let rows = body + .tag() + .map_err(Error::parse)? + .rsplit(' ') + .next() + .unwrap() + .parse() + .unwrap_or(0); + Ok(rows) +} + +pub async fn execute( client: &InnerClient, statement: Statement, params: I, ) -> Result where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let buf = if log_enabled!(Level::Debug) { @@ -75,7 +172,7 @@ where debug!( "executing statement {} with parameters: {:?}", statement.name(), - params, + BorrowToSqlParamsDebug(params.as_slice()), ); encode(client, &statement, params)? } else { @@ -83,21 +180,15 @@ where }; let mut responses = start(client, buf).await?; + let mut rows = 0; loop { match responses.next().await? { Message::DataRow(_) => {} Message::CommandComplete(body) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); - return Ok(rows); + rows = extract_row_affected(&body)?; } - Message::EmptyQueryResponse => return Ok(0), + Message::EmptyQueryResponse => rows = 0, + Message::ReadyForQuery(_) => return Ok(rows), _ => return Err(Error::unexpected_message()), } } @@ -114,9 +205,10 @@ async fn start(client: &InnerClient, buf: Bytes) -> Result { Ok(responses) } -pub fn encode<'a, I>(client: &InnerClient, statement: &Statement, params: I) -> Result +pub fn encode(client: &InnerClient, statement: &Statement, params: I) -> Result where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { client.with_buf(|buf| { @@ -127,32 +219,53 @@ where }) } -pub fn encode_bind<'a, I>( +pub fn encode_bind( statement: &Statement, params: I, portal: &str, buf: &mut BytesMut, ) -> Result<(), Error> where - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { let params = params.into_iter(); + if params.len() != statement.params().len() { + return Err(Error::parameters(params.len(), statement.params().len())); + } - assert!( - statement.params().len() == params.len(), - "expected {} parameters but got {}", - statement.params().len(), - params.len() - ); + encode_bind_raw( + statement.name(), + params.zip(statement.params().iter().cloned()), + portal, + buf, + ) +} + +fn encode_bind_raw( + statement_name: &str, + params: I, + portal: &str, + buf: &mut BytesMut, +) -> Result<(), Error> +where + P: BorrowToSql, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, +{ + let (param_formats, params): (Vec<_>, Vec<_>) = params + .into_iter() + .map(|(p, ty)| (p.borrow_to_sql().encode_format(&ty) as i16, (p, ty))) + .unzip(); let mut error_idx = 0; let r = frontend::bind( portal, - statement.name(), - Some(1), - params.zip(statement.params()).enumerate(), - |(idx, (param, ty)), buf| match param.to_sql_checked(ty, buf) { + statement_name, + param_formats, + params.into_iter().enumerate(), + |(idx, (param, ty)), buf| match param.borrow_to_sql().to_sql_checked(&ty, buf) { Ok(IsNull::No) => Ok(postgres_protocol::IsNull::No), Ok(IsNull::Yes) => Ok(postgres_protocol::IsNull::Yes), Err(e) => { @@ -175,6 +288,7 @@ pin_project! { pub struct RowStream { statement: Statement, responses: Responses, + rows_affected: Option, #[pin] _p: PhantomPinned, } @@ -185,15 +299,27 @@ impl Stream for RowStream { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); - match ready!(this.responses.poll_next(cx)?) { - Message::DataRow(body) => { - Poll::Ready(Some(Ok(Row::new(this.statement.clone(), body)?))) + loop { + match ready!(this.responses.poll_next(cx)?) { + Message::DataRow(body) => { + return Poll::Ready(Some(Ok(Row::new(this.statement.clone(), body)?))) + } + Message::CommandComplete(body) => { + *this.rows_affected = Some(extract_row_affected(&body)?); + } + Message::EmptyQueryResponse | Message::PortalSuspended => {} + Message::ReadyForQuery(_) => return Poll::Ready(None), + _ => return Poll::Ready(Some(Err(Error::unexpected_message()))), } - Message::EmptyQueryResponse - | Message::CommandComplete(_) - | Message::PortalSuspended => Poll::Ready(None), - Message::ErrorResponse(body) => Poll::Ready(Some(Err(Error::db(body)))), - _ => Poll::Ready(Some(Err(Error::unexpected_message()))), } } } + +impl RowStream { + /// Returns the number of rows affected by the query. + /// + /// This function will return `None` until the stream has been exhausted. + pub fn rows_affected(&self) -> Option { + self.rows_affected + } +} diff --git a/tokio-postgres/src/row.rs b/tokio-postgres/src/row.rs index 842216ad2..ccb8817d0 100644 --- a/tokio-postgres/src/row.rs +++ b/tokio-postgres/src/row.rs @@ -1,6 +1,7 @@ //! Rows. use crate::row::sealed::{AsName, Sealed}; +use crate::simple_query::SimpleColumn; use crate::statement::Column; use crate::types::{FromSql, Type, WrongType}; use crate::{Error, Statement}; @@ -78,9 +79,9 @@ impl RowIndex for str { } } -impl<'a, T> Sealed for &'a T where T: ?Sized + Sealed {} +impl Sealed for &T where T: ?Sized + Sealed {} -impl<'a, T> RowIndex for &'a T +impl RowIndex for &T where T: ?Sized + RowIndex, { @@ -94,6 +95,7 @@ where } /// A row of data returned from the database by a query. +#[derive(Clone)] pub struct Row { statement: Statement, body: DataRowBody, @@ -140,6 +142,7 @@ impl Row { /// # Panics /// /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. + #[track_caller] pub fn get<'a, I, T>(&'a self, idx: I) -> T where I: RowIndex + fmt::Display, @@ -188,16 +191,26 @@ impl Row { } } +impl AsName for SimpleColumn { + fn as_name(&self) -> &str { + self.name() + } +} + /// A row of data returned from the database by a simple query. +#[derive(Debug)] pub struct SimpleQueryRow { - columns: Arc<[String]>, + columns: Arc<[SimpleColumn]>, body: DataRowBody, ranges: Vec>>, } impl SimpleQueryRow { #[allow(clippy::new_ret_no_self)] - pub(crate) fn new(columns: Arc<[String]>, body: DataRowBody) -> Result { + pub(crate) fn new( + columns: Arc<[SimpleColumn]>, + body: DataRowBody, + ) -> Result { let ranges = body.ranges().collect().map_err(Error::parse)?; Ok(SimpleQueryRow { columns, @@ -206,6 +219,11 @@ impl SimpleQueryRow { }) } + /// Returns information about the columns of data in the row. + pub fn columns(&self) -> &[SimpleColumn] { + &self.columns + } + /// Determines if the row contains no values. pub fn is_empty(&self) -> bool { self.len() == 0 @@ -223,6 +241,7 @@ impl SimpleQueryRow { /// # Panics /// /// Panics if the index is out of bounds or if the value cannot be converted to the specified type. + #[track_caller] pub fn get(&self, idx: I) -> Option<&str> where I: RowIndex + fmt::Display, diff --git a/tokio-postgres/src/simple_query.rs b/tokio-postgres/src/simple_query.rs index 82ac35664..24473b896 100644 --- a/tokio-postgres/src/simple_query.rs +++ b/tokio-postgres/src/simple_query.rs @@ -1,10 +1,11 @@ use crate::client::{InnerClient, Responses}; use crate::codec::FrontendMessage; use crate::connection::RequestMessages; +use crate::query::extract_row_affected; use crate::{Error, SimpleQueryMessage, SimpleQueryRow}; use bytes::Bytes; use fallible_iterator::FallibleIterator; -use futures::{ready, Stream}; +use futures_util::{ready, Stream}; use log::debug; use pin_project_lite::pin_project; use postgres_protocol::message::backend::Message; @@ -14,6 +15,23 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; +/// Information about a column of a single query row. +#[derive(Debug)] +pub struct SimpleColumn { + name: String, +} + +impl SimpleColumn { + pub(crate) fn new(name: String) -> SimpleColumn { + SimpleColumn { name } + } + + /// Returns the name of the column. + pub fn name(&self) -> &str { + &self.name + } +} + pub async fn simple_query(client: &InnerClient, query: &str) -> Result { debug!("executing simple query: {}", query); @@ -56,7 +74,7 @@ pin_project! { /// A stream of simple query results. pub struct SimpleQueryStream { responses: Responses, - columns: Option>, + columns: Option>, #[pin] _p: PhantomPinned, } @@ -67,41 +85,34 @@ impl Stream for SimpleQueryStream { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); - loop { - match ready!(this.responses.poll_next(cx)?) { - Message::CommandComplete(body) => { - let rows = body - .tag() - .map_err(Error::parse)? - .rsplit(' ') - .next() - .unwrap() - .parse() - .unwrap_or(0); - return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows)))); - } - Message::EmptyQueryResponse => { - return Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0)))); - } - Message::RowDescription(body) => { - let columns = body - .fields() - .map(|f| Ok(f.name().to_string())) - .collect::>() - .map_err(Error::parse)? - .into(); - *this.columns = Some(columns); - } - Message::DataRow(body) => { - let row = match &this.columns { - Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, - None => return Poll::Ready(Some(Err(Error::unexpected_message()))), - }; - return Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row)))); - } - Message::ReadyForQuery(_) => return Poll::Ready(None), - _ => return Poll::Ready(Some(Err(Error::unexpected_message()))), + match ready!(this.responses.poll_next(cx)?) { + Message::CommandComplete(body) => { + let rows = extract_row_affected(&body)?; + Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(rows)))) + } + Message::EmptyQueryResponse => { + Poll::Ready(Some(Ok(SimpleQueryMessage::CommandComplete(0)))) + } + Message::RowDescription(body) => { + let columns: Arc<[SimpleColumn]> = body + .fields() + .map(|f| Ok(SimpleColumn::new(f.name().to_string()))) + .collect::>() + .map_err(Error::parse)? + .into(); + + *this.columns = Some(columns.clone()); + Poll::Ready(Some(Ok(SimpleQueryMessage::RowDescription(columns)))) + } + Message::DataRow(body) => { + let row = match &this.columns { + Some(columns) => SimpleQueryRow::new(columns.clone(), body)?, + None => return Poll::Ready(Some(Err(Error::unexpected_message()))), + }; + Poll::Ready(Some(Ok(SimpleQueryMessage::Row(row)))) } + Message::ReadyForQuery(_) => Poll::Ready(None), + _ => Poll::Ready(Some(Err(Error::unexpected_message()))), } } } diff --git a/tokio-postgres/src/socket.rs b/tokio-postgres/src/socket.rs index cc7149674..966510d56 100644 --- a/tokio-postgres/src/socket.rs +++ b/tokio-postgres/src/socket.rs @@ -1,9 +1,7 @@ -use bytes::{Buf, BufMut}; use std::io; -use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::net::TcpStream; #[cfg(unix)] use tokio::net::UnixStream; @@ -33,41 +31,17 @@ impl Socket { } impl AsyncRead for Socket { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit]) -> bool { - match &self.0 { - Inner::Tcp(s) => s.prepare_uninitialized_buffer(buf), - #[cfg(unix)] - Inner::Unix(s) => s.prepare_uninitialized_buffer(buf), - } - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf<'_>, + ) -> Poll> { match &mut self.0 { Inner::Tcp(s) => Pin::new(s).poll_read(cx, buf), #[cfg(unix)] Inner::Unix(s) => Pin::new(s).poll_read(cx, buf), } } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - B: BufMut, - { - match &mut self.0 { - Inner::Tcp(s) => Pin::new(s).poll_read_buf(cx, buf), - #[cfg(unix)] - Inner::Unix(s) => Pin::new(s).poll_read_buf(cx, buf), - } - } } impl AsyncWrite for Socket { @@ -98,20 +72,4 @@ impl AsyncWrite for Socket { Inner::Unix(s) => Pin::new(s).poll_shutdown(cx), } } - - fn poll_write_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut B, - ) -> Poll> - where - Self: Sized, - B: Buf, - { - match &mut self.0 { - Inner::Tcp(s) => Pin::new(s).poll_write_buf(cx, buf), - #[cfg(unix)] - Inner::Unix(s) => Pin::new(s).poll_write_buf(cx, buf), - } - } } diff --git a/tokio-postgres/src/statement.rs b/tokio-postgres/src/statement.rs index 97561a8e4..4f7ddaec6 100644 --- a/tokio-postgres/src/statement.rs +++ b/tokio-postgres/src/statement.rs @@ -3,10 +3,7 @@ use crate::codec::FrontendMessage; use crate::connection::RequestMessages; use crate::types::Type; use postgres_protocol::message::frontend; -use std::{ - fmt, - sync::{Arc, Weak}, -}; +use std::sync::{Arc, Weak}; struct StatementInner { client: Weak, @@ -17,6 +14,10 @@ struct StatementInner { impl Drop for StatementInner { fn drop(&mut self) { + if self.name.is_empty() { + // Unnamed statements don't need to be closed + return; + } if let Some(client) = self.client.upgrade() { let buf = client.with_buf(|buf| { frontend::close(b'S', &self.name, buf).unwrap(); @@ -49,6 +50,15 @@ impl Statement { })) } + pub(crate) fn unnamed(params: Vec, columns: Vec) -> Statement { + Statement(Arc::new(StatementInner { + client: Weak::new(), + name: String::new(), + params, + columns, + })) + } + pub(crate) fn name(&self) -> &str { &self.0.name } @@ -64,33 +74,43 @@ impl Statement { } } +impl std::fmt::Debug for Statement { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + f.debug_struct("Statement") + .field("name", &self.0.name) + .field("params", &self.0.params) + .field("columns", &self.0.columns) + .finish_non_exhaustive() + } +} + /// Information about a column of a query. +#[derive(Debug)] pub struct Column { - name: String, - type_: Type, + pub(crate) name: String, + pub(crate) table_oid: Option, + pub(crate) column_id: Option, + pub(crate) r#type: Type, } impl Column { - pub(crate) fn new(name: String, type_: Type) -> Column { - Column { name, type_ } - } - /// Returns the name of the column. pub fn name(&self) -> &str { &self.name } - /// Returns the type of the column. - pub fn type_(&self) -> &Type { - &self.type_ + /// Returns the OID of the underlying database table. + pub fn table_oid(&self) -> Option { + self.table_oid + } + + /// Return the column ID within the underlying database table. + pub fn column_id(&self) -> Option { + self.column_id } -} -impl fmt::Debug for Column { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Column") - .field("name", &self.name) - .field("type", &self.type_) - .finish() + /// Returns the type of the column. + pub fn type_(&self) -> &Type { + &self.r#type } } diff --git a/tokio-postgres/src/tls.rs b/tokio-postgres/src/tls.rs index 4e852d3f9..963daed18 100644 --- a/tokio-postgres/src/tls.rs +++ b/tokio-postgres/src/tls.rs @@ -5,7 +5,7 @@ use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use std::{fmt, io}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; pub(crate) mod private { pub struct ForcePrivateApi; @@ -125,8 +125,8 @@ impl AsyncRead for NoTlsStream { fn poll_read( self: Pin<&mut Self>, _: &mut Context<'_>, - _: &mut [u8], - ) -> Poll> { + _: &mut ReadBuf<'_>, + ) -> Poll> { match *self {} } } diff --git a/tokio-postgres/src/to_statement.rs b/tokio-postgres/src/to_statement.rs index 3ff82493c..7e1299272 100644 --- a/tokio-postgres/src/to_statement.rs +++ b/tokio-postgres/src/to_statement.rs @@ -11,7 +11,7 @@ mod private { Query(&'a str), } - impl<'a> ToStatementType<'a> { + impl ToStatementType<'_> { pub async fn into_statement(self, client: &Client) -> Result { match self { ToStatementType::Statement(s) => Ok(s.clone()), @@ -47,3 +47,11 @@ impl ToStatement for str { } impl Sealed for str {} + +impl ToStatement for String { + fn __convert(&self) -> ToStatementType<'_> { + ToStatementType::Query(self) + } +} + +impl Sealed for String {} diff --git a/tokio-postgres/src/transaction.rs b/tokio-postgres/src/transaction.rs index a1ee4f6cb..782c476c4 100644 --- a/tokio-postgres/src/transaction.rs +++ b/tokio-postgres/src/transaction.rs @@ -5,7 +5,7 @@ use crate::query::RowStream; #[cfg(feature = "runtime")] use crate::tls::MakeTlsConnect; use crate::tls::TlsConnect; -use crate::types::{ToSql, Type}; +use crate::types::{BorrowToSql, ToSql, Type}; #[cfg(feature = "runtime")] use crate::Socket; use crate::{ @@ -13,7 +13,7 @@ use crate::{ SimpleQueryMessage, Statement, ToStatement, }; use bytes::Buf; -use futures::TryStreamExt; +use futures_util::TryStreamExt; use postgres_protocol::message::frontend; use tokio::io::{AsyncRead, AsyncWrite}; @@ -23,20 +23,26 @@ use tokio::io::{AsyncRead, AsyncWrite}; /// transaction. Transactions can be nested, with inner transactions implemented via safepoints. pub struct Transaction<'a> { client: &'a mut Client, - depth: u32, + savepoint: Option, done: bool, } -impl<'a> Drop for Transaction<'a> { +/// A representation of a PostgreSQL database savepoint. +struct Savepoint { + name: String, + depth: u32, +} + +impl Drop for Transaction<'_> { fn drop(&mut self) { if self.done { return; } - let query = if self.depth == 0 { - "ROLLBACK".to_string() + let query = if let Some(sp) = self.savepoint.as_ref() { + format!("ROLLBACK TO {}", sp.name) } else { - format!("ROLLBACK TO sp{}", self.depth) + "ROLLBACK".to_string() }; let buf = self.client.inner().with_buf(|buf| { frontend::query(&query, buf).unwrap(); @@ -53,7 +59,7 @@ impl<'a> Transaction<'a> { pub(crate) fn new(client: &'a mut Client) -> Transaction<'a> { Transaction { client, - depth: 0, + savepoint: None, done: false, } } @@ -61,10 +67,10 @@ impl<'a> Transaction<'a> { /// Consumes the transaction, committing all changes made within it. pub async fn commit(mut self) -> Result<(), Error> { self.done = true; - let query = if self.depth == 0 { - "COMMIT".to_string() + let query = if let Some(sp) = self.savepoint.as_ref() { + format!("RELEASE {}", sp.name) } else { - format!("RELEASE sp{}", self.depth) + "COMMIT".to_string() }; self.client.batch_execute(&query).await } @@ -74,10 +80,10 @@ impl<'a> Transaction<'a> { /// This is equivalent to `Transaction`'s `Drop` implementation, but provides any error encountered to the caller. pub async fn rollback(mut self) -> Result<(), Error> { self.done = true; - let query = if self.depth == 0 { - "ROLLBACK".to_string() + let query = if let Some(sp) = self.savepoint.as_ref() { + format!("ROLLBACK TO {}", sp.name) } else { - format!("ROLLBACK TO sp{}", self.depth) + "ROLLBACK".to_string() }; self.client.batch_execute(&query).await } @@ -133,15 +139,34 @@ impl<'a> Transaction<'a> { } /// Like `Client::query_raw`. - pub async fn query_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + pub async fn query_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.client.query_raw(statement, params).await } + /// Like `Client::query_typed`. + pub async fn query_typed( + &self, + statement: &str, + params: &[(&(dyn ToSql + Sync), Type)], + ) -> Result, Error> { + self.client.query_typed(statement, params).await + } + + /// Like `Client::query_typed_raw`. + pub async fn query_typed_raw(&self, query: &str, params: I) -> Result + where + P: BorrowToSql, + I: IntoIterator, + { + self.client.query_typed_raw(query, params).await + } + /// Like `Client::execute`. pub async fn execute( &self, @@ -155,10 +180,11 @@ impl<'a> Transaction<'a> { } /// Like `Client::execute_iter`. - pub async fn execute_raw<'b, I, T>(&self, statement: &T, params: I) -> Result + pub async fn execute_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { self.client.execute_raw(statement, params).await @@ -186,13 +212,14 @@ impl<'a> Transaction<'a> { /// A maximally flexible version of [`bind`]. /// /// [`bind`]: #method.bind - pub async fn bind_raw<'b, T, I>(&self, statement: &T, params: I) -> Result + pub async fn bind_raw(&self, statement: &T, params: I) -> Result where T: ?Sized + ToStatement, - I: IntoIterator, + P: BorrowToSql, + I: IntoIterator, I::IntoIter: ExactSizeIterator, { - let statement = statement.__convert().into_statement(&self.client).await?; + let statement = statement.__convert().into_statement(self.client).await?; bind::bind(self.client.inner(), statement, params).await } @@ -272,16 +299,34 @@ impl<'a> Transaction<'a> { self.client.cancel_query_raw(stream, tls).await } - /// Like `Client::transaction`. + /// Like `Client::transaction`, but creates a nested transaction via a savepoint. pub async fn transaction(&mut self) -> Result, Error> { - let depth = self.depth + 1; - let query = format!("SAVEPOINT sp{}", depth); + self._savepoint(None).await + } + + /// Like `Client::transaction`, but creates a nested transaction via a savepoint with the specified name. + pub async fn savepoint(&mut self, name: I) -> Result, Error> + where + I: Into, + { + self._savepoint(Some(name.into())).await + } + + async fn _savepoint(&mut self, name: Option) -> Result, Error> { + let depth = self.savepoint.as_ref().map_or(0, |sp| sp.depth) + 1; + let name = name.unwrap_or_else(|| format!("sp_{}", depth)); + let query = format!("SAVEPOINT {}", name); self.batch_execute(&query).await?; Ok(Transaction { client: self.client, - depth, + savepoint: Some(Savepoint { name, depth }), done: false, }) } + + /// Returns a reference to the underlying `Client`. + pub fn client(&self) -> &Client { + self.client + } } diff --git a/tokio-postgres/src/transaction_builder.rs b/tokio-postgres/src/transaction_builder.rs index 9718ac588..88c883176 100644 --- a/tokio-postgres/src/transaction_builder.rs +++ b/tokio-postgres/src/transaction_builder.rs @@ -1,4 +1,6 @@ -use crate::{Client, Error, Transaction}; +use postgres_protocol::message::frontend; + +use crate::{codec::FrontendMessage, connection::RequestMessages, Client, Error, Transaction}; /// The isolation level of a database transaction. #[derive(Debug, Copy, Clone)] @@ -106,7 +108,41 @@ impl<'a> TransactionBuilder<'a> { query.push_str(s); } - self.client.batch_execute(&query).await?; + struct RollbackIfNotDone<'me> { + client: &'me Client, + done: bool, + } + + impl Drop for RollbackIfNotDone<'_> { + fn drop(&mut self) { + if self.done { + return; + } + + let buf = self.client.inner().with_buf(|buf| { + frontend::query("ROLLBACK", buf).unwrap(); + buf.split().freeze() + }); + let _ = self + .client + .inner() + .send(RequestMessages::Single(FrontendMessage::Raw(buf))); + } + } + + // This is done as `Future` created by this method can be dropped after + // `RequestMessages` is synchronously send to the `Connection` by + // `batch_execute()`, but before `Responses` is asynchronously polled to + // completion. In that case `Transaction` won't be created and thus + // won't be rolled back. + { + let mut cleaner = RollbackIfNotDone { + client: self.client, + done: false, + }; + self.client.batch_execute(&query).await?; + cleaner.done = true; + } Ok(Transaction::new(self.client)) } diff --git a/tokio-postgres/tests/test/binary_copy.rs b/tokio-postgres/tests/test/binary_copy.rs index ab69742dc..94b96ab85 100644 --- a/tokio-postgres/tests/test/binary_copy.rs +++ b/tokio-postgres/tests/test/binary_copy.rs @@ -1,5 +1,5 @@ use crate::connect; -use futures::{pin_mut, TryStreamExt}; +use futures_util::{pin_mut, TryStreamExt}; use tokio_postgres::binary_copy::{BinaryCopyInWriter, BinaryCopyOutStream}; use tokio_postgres::types::Type; diff --git a/tokio-postgres/tests/test/main.rs b/tokio-postgres/tests/test/main.rs index b01037edc..9a6aa26fe 100644 --- a/tokio-postgres/tests/test/main.rs +++ b/tokio-postgres/tests/test/main.rs @@ -1,11 +1,14 @@ #![warn(rust_2018_idioms)] use bytes::{Bytes, BytesMut}; -use futures::channel::mpsc; -use futures::{ - future, join, pin_mut, stream, try_join, FutureExt, SinkExt, StreamExt, TryStreamExt, +use futures_channel::mpsc; +use futures_util::{ + future, join, pin_mut, stream, try_join, Future, FutureExt, SinkExt, StreamExt, TryStreamExt, }; +use pin_project_lite::pin_project; use std::fmt::Write; +use std::pin::Pin; +use std::task::{Context, Poll}; use std::time::Duration; use tokio::net::TcpStream; use tokio::time; @@ -22,6 +25,35 @@ mod parse; mod runtime; mod types; +pin_project! { + /// Polls `F` at most `polls_left` times returning `Some(F::Output)` if + /// [`Future`] returned [`Poll::Ready`] or [`None`] otherwise. + struct Cancellable { + #[pin] + fut: F, + polls_left: usize, + } +} + +impl Future for Cancellable { + type Output = Option; + + fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { + let this = self.project(); + match this.fut.poll(ctx) { + Poll::Ready(r) => Poll::Ready(Some(r)), + Poll::Pending => { + *this.polls_left = this.polls_left.saturating_sub(1); + if *this.polls_left == 0 { + Poll::Ready(None) + } else { + Poll::Pending + } + } + } + } +} + async fn connect_raw(s: &str) -> Result<(Client, Connection), Error> { let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let config = s.parse::().unwrap(); @@ -35,6 +67,20 @@ async fn connect(s: &str) -> Client { client } +async fn current_transaction_id(client: &Client) -> i64 { + client + .query("SELECT txid_current()", &[]) + .await + .unwrap() + .pop() + .unwrap() + .get::<_, i64>("txid_current") +} + +async fn in_transaction(client: &Client) -> bool { + current_transaction_id(client).await == current_transaction_id(client).await +} + #[tokio::test] async fn plain_password_missing() { connect_raw("user=pass_user dbname=postgres") @@ -257,6 +303,7 @@ async fn custom_range() { } #[tokio::test] +#[allow(clippy::get_first)] async fn simple_query() { let client = connect("user=postgres").await; @@ -281,24 +328,35 @@ async fn simple_query() { _ => panic!("unexpected message"), } match &messages[2] { + SimpleQueryMessage::RowDescription(columns) => { + assert_eq!(columns.get(0).map(|c| c.name()), Some("id")); + assert_eq!(columns.get(1).map(|c| c.name()), Some("name")); + } + _ => panic!("unexpected message"), + } + match &messages[3] { SimpleQueryMessage::Row(row) => { + assert_eq!(row.columns().get(0).map(|c| c.name()), Some("id")); + assert_eq!(row.columns().get(1).map(|c| c.name()), Some("name")); assert_eq!(row.get(0), Some("1")); assert_eq!(row.get(1), Some("steven")); } _ => panic!("unexpected message"), } - match &messages[3] { + match &messages[4] { SimpleQueryMessage::Row(row) => { + assert_eq!(row.columns().get(0).map(|c| c.name()), Some("id")); + assert_eq!(row.columns().get(1).map(|c| c.name()), Some("name")); assert_eq!(row.get(0), Some("2")); assert_eq!(row.get(1), Some("joe")); } _ => panic!("unexpected message"), } - match messages[4] { + match messages[5] { SimpleQueryMessage::CommandComplete(2) => {} _ => panic!("unexpected message"), } - assert_eq!(messages.len(), 5); + assert_eq!(messages.len(), 6); } #[tokio::test] @@ -308,7 +366,7 @@ async fn cancel_query_raw() { let socket = TcpStream::connect("127.0.0.1:5433").await.unwrap(); let cancel_token = client.cancel_token(); let cancel = cancel_token.cancel_query_raw(socket, NoTls); - let cancel = time::delay_for(Duration::from_millis(100)).then(|()| cancel); + let cancel = time::sleep(Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); @@ -373,6 +431,80 @@ async fn transaction_rollback() { assert_eq!(rows.len(), 0); } +#[tokio::test] +async fn transaction_future_cancellation() { + let mut client = connect("user=postgres").await; + + for i in 0.. { + let done = { + let txn = client.transaction(); + let fut = Cancellable { + fut: txn, + polls_left: i, + }; + fut.await + .map(|res| res.expect("transaction failed")) + .is_some() + }; + + assert!(!in_transaction(&client).await); + + if done { + break; + } + } +} + +#[tokio::test] +async fn transaction_commit_future_cancellation() { + let mut client = connect("user=postgres").await; + + for i in 0.. { + let done = { + let txn = client.transaction().await.unwrap(); + let commit = txn.commit(); + let fut = Cancellable { + fut: commit, + polls_left: i, + }; + fut.await + .map(|res| res.expect("transaction failed")) + .is_some() + }; + + assert!(!in_transaction(&client).await); + + if done { + break; + } + } +} + +#[tokio::test] +async fn transaction_rollback_future_cancellation() { + let mut client = connect("user=postgres").await; + + for i in 0.. { + let done = { + let txn = client.transaction().await.unwrap(); + let rollback = txn.rollback(); + let fut = Cancellable { + fut: rollback, + polls_left: i, + }; + fut.await + .map(|res| res.expect("transaction failed")) + .is_some() + }; + + assert!(!in_transaction(&client).await); + + if done { + break; + } + } +} + #[tokio::test] async fn transaction_rollback_drop() { let mut client = connect("user=postgres").await; @@ -577,7 +709,8 @@ async fn notices() { .unwrap(); let (tx, rx) = mpsc::unbounded(); - let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!(e)); + let stream = + stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!("{}", e)); let connection = stream.forward(tx).map(|r| r.unwrap()); tokio::spawn(connection); @@ -612,7 +745,8 @@ async fn notifications() { let (client, mut connection) = connect_raw("user=postgres").await.unwrap(); let (tx, rx) = mpsc::unbounded(); - let stream = stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!(e)); + let stream = + stream::poll_fn(move |cx| connection.poll_message(cx)).map_err(|e| panic!("{}", e)); let connection = stream.forward(tx).map(|r| r.unwrap()); tokio::spawn(connection); @@ -799,3 +933,149 @@ async fn query_opt() { .err() .unwrap(); } + +#[tokio::test] +async fn deferred_constraint() { + let client = connect("user=postgres").await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE t ( + i INT, + UNIQUE (i) DEFERRABLE INITIALLY DEFERRED + ); + ", + ) + .await + .unwrap(); + + client + .execute("INSERT INTO t (i) VALUES (1)", &[]) + .await + .unwrap(); + client + .execute("INSERT INTO t (i) VALUES (1)", &[]) + .await + .unwrap_err(); +} + +#[tokio::test] +async fn query_typed_no_transaction() { + let client = connect("user=postgres").await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo ( + name TEXT, + age INT + ); + INSERT INTO foo (name, age) VALUES ('alice', 20), ('bob', 30), ('carol', 40); + ", + ) + .await + .unwrap(); + + let rows: Vec = client + .query_typed( + "SELECT name, age, 'literal', 5 FROM foo WHERE name <> $1 AND age < $2 ORDER BY age", + &[(&"alice", Type::TEXT), (&50i32, Type::INT4)], + ) + .await + .unwrap(); + + assert_eq!(rows.len(), 2); + let first_row = &rows[0]; + assert_eq!(first_row.get::<_, &str>(0), "bob"); + assert_eq!(first_row.get::<_, i32>(1), 30); + assert_eq!(first_row.get::<_, &str>(2), "literal"); + assert_eq!(first_row.get::<_, i32>(3), 5); + + let second_row = &rows[1]; + assert_eq!(second_row.get::<_, &str>(0), "carol"); + assert_eq!(second_row.get::<_, i32>(1), 40); + assert_eq!(second_row.get::<_, &str>(2), "literal"); + assert_eq!(second_row.get::<_, i32>(3), 5); + + // Test for UPDATE that returns no data + let updated_rows = client + .query_typed("UPDATE foo set age = 33", &[]) + .await + .unwrap(); + assert_eq!(updated_rows.len(), 0); +} + +#[tokio::test] +async fn query_typed_with_transaction() { + let mut client = connect("user=postgres").await; + + client + .batch_execute( + " + CREATE TEMPORARY TABLE foo ( + name TEXT, + age INT + ); + ", + ) + .await + .unwrap(); + + let transaction = client.transaction().await.unwrap(); + + let rows: Vec = transaction + .query_typed( + "INSERT INTO foo (name, age) VALUES ($1, $2), ($3, $4), ($5, $6) returning name, age", + &[ + (&"alice", Type::TEXT), + (&20i32, Type::INT4), + (&"bob", Type::TEXT), + (&30i32, Type::INT4), + (&"carol", Type::TEXT), + (&40i32, Type::INT4), + ], + ) + .await + .unwrap(); + let inserted_values: Vec<(String, i32)> = rows + .iter() + .map(|row| (row.get::<_, String>(0), row.get::<_, i32>(1))) + .collect(); + assert_eq!( + inserted_values, + [ + ("alice".to_string(), 20), + ("bob".to_string(), 30), + ("carol".to_string(), 40) + ] + ); + + let rows: Vec = transaction + .query_typed( + "SELECT name, age, 'literal', 5 FROM foo WHERE name <> $1 AND age < $2 ORDER BY age", + &[(&"alice", Type::TEXT), (&50i32, Type::INT4)], + ) + .await + .unwrap(); + + assert_eq!(rows.len(), 2); + let first_row = &rows[0]; + assert_eq!(first_row.get::<_, &str>(0), "bob"); + assert_eq!(first_row.get::<_, i32>(1), 30); + assert_eq!(first_row.get::<_, &str>(2), "literal"); + assert_eq!(first_row.get::<_, i32>(3), 5); + + let second_row = &rows[1]; + assert_eq!(second_row.get::<_, &str>(0), "carol"); + assert_eq!(second_row.get::<_, i32>(1), 40); + assert_eq!(second_row.get::<_, &str>(2), "literal"); + assert_eq!(second_row.get::<_, i32>(3), 5); + + // Test for UPDATE that returns no data + let updated_rows = transaction + .query_typed("UPDATE foo set age = 33", &[]) + .await + .unwrap(); + assert_eq!(updated_rows.len(), 0); +} diff --git a/tokio-postgres/tests/test/parse.rs b/tokio-postgres/tests/test/parse.rs index a7a9625b2..35eeca72b 100644 --- a/tokio-postgres/tests/test/parse.rs +++ b/tokio-postgres/tests/test/parse.rs @@ -1,5 +1,5 @@ use std::time::Duration; -use tokio_postgres::config::{Config, TargetSessionAttrs}; +use tokio_postgres::config::{Config, SslNegotiation, TargetSessionAttrs}; fn check(s: &str, config: &Config) { assert_eq!(s.parse::().expect(s), *config, "`{}`", s); @@ -34,6 +34,30 @@ fn settings() { .keepalives_idle(Duration::from_secs(30)) .target_session_attrs(TargetSessionAttrs::ReadWrite), ); + check( + "connect_timeout=3 keepalives=0 keepalives_idle=30 target_session_attrs=read-only", + Config::new() + .connect_timeout(Duration::from_secs(3)) + .keepalives(false) + .keepalives_idle(Duration::from_secs(30)) + .target_session_attrs(TargetSessionAttrs::ReadOnly), + ); + check( + "sslnegotiation=direct", + Config::new().ssl_negotiation(SslNegotiation::Direct), + ); +} + +#[test] +fn keepalive_settings() { + check( + "keepalives=1 keepalives_idle=15 keepalives_interval=5 keepalives_retries=9", + Config::new() + .keepalives(true) + .keepalives_idle(Duration::from_secs(15)) + .keepalives_interval(Duration::from_secs(5)) + .keepalives_retries(9), + ); } #[test] diff --git a/tokio-postgres/tests/test/runtime.rs b/tokio-postgres/tests/test/runtime.rs index e07aa4a63..86c1f0701 100644 --- a/tokio-postgres/tests/test/runtime.rs +++ b/tokio-postgres/tests/test/runtime.rs @@ -1,4 +1,4 @@ -use futures::{join, FutureExt}; +use futures_util::{join, FutureExt}; use std::time::Duration; use tokio::time; use tokio_postgres::error::SqlState; @@ -66,13 +66,65 @@ async fn target_session_attrs_err() { .unwrap(); } +#[tokio::test] +async fn host_only_ok() { + let _ = tokio_postgres::connect( + "host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_only_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_and_host_ok() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_mismatch() { + let _ = tokio_postgres::connect( + "hostaddr=127.0.0.1,127.0.0.2 host=localhost port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + +#[tokio::test] +async fn hostaddr_host_both_missing() { + let _ = tokio_postgres::connect( + "port=5433 user=pass_user dbname=postgres password=password", + NoTls, + ) + .await + .err() + .unwrap(); +} + #[tokio::test] async fn cancel_query() { let client = connect("host=localhost port=5433 user=postgres").await; let cancel_token = client.cancel_token(); let cancel = cancel_token.cancel_query(NoTls); - let cancel = time::delay_for(Duration::from_millis(100)).then(|()| cancel); + let cancel = time::sleep(Duration::from_millis(100)).then(|()| cancel); let sleep = client.batch_execute("SELECT pg_sleep(100)"); diff --git a/tokio-postgres/tests/test/types/chrono_04.rs b/tokio-postgres/tests/test/types/chrono_04.rs index 13c8dc14f..eda8151a6 100644 --- a/tokio-postgres/tests/test/types/chrono_04.rs +++ b/tokio-postgres/tests/test/types/chrono_04.rs @@ -1,6 +1,9 @@ -use chrono_04::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, TimeZone, Utc}; -use tokio_postgres::types::{Date, Timestamp}; +use chrono_04::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc}; +use std::fmt; +use tokio_postgres::types::{Date, FromSqlOwned, Timestamp}; +use tokio_postgres::Client; +use crate::connect; use crate::types::test_type; #[tokio::test] @@ -51,8 +54,9 @@ async fn test_date_time_params() { fn make_check(time: &str) -> (Option>, &str) { ( Some( - Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") - .unwrap(), + DateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f%#z'") + .unwrap() + .to_utc(), ), time, ) @@ -60,9 +64,9 @@ async fn test_date_time_params() { test_type( "TIMESTAMP WITH TIME ZONE", &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), (None, "NULL"), ], ) @@ -74,8 +78,9 @@ async fn test_with_special_date_time_params() { fn make_check(time: &str) -> (Timestamp>, &str) { ( Timestamp::Value( - Utc.datetime_from_str(time, "'%Y-%m-%d %H:%M:%S.%f'") - .unwrap(), + DateTime::parse_from_str(time, "'%Y-%m-%d %H:%M:%S.%f%#z'") + .unwrap() + .to_utc(), ), time, ) @@ -83,9 +88,9 @@ async fn test_with_special_date_time_params() { test_type( "TIMESTAMP WITH TIME ZONE", &[ - make_check("'1970-01-01 00:00:00.010000000'"), - make_check("'1965-09-25 11:19:33.100314000'"), - make_check("'2010-02-09 23:11:45.120200000'"), + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), (Timestamp::PosInfinity, "'infinity'"), (Timestamp::NegInfinity, "'-infinity'"), ], @@ -153,3 +158,33 @@ async fn test_time_params() { ) .await; } + +#[tokio::test] +async fn test_special_params_without_wrapper() { + async fn assert_overflows(client: &mut Client, val: &str, sql_type: &str) + where + T: FromSqlOwned + fmt::Debug, + { + let err = client + .query_one(&*format!("SELECT {}::{}", val, sql_type), &[]) + .await + .unwrap() + .try_get::<_, T>(0) + .unwrap_err(); + assert_eq!( + err.to_string(), + "error deserializing column 0: value too large to decode" + ); + } + + let mut client = connect("user=postgres").await; + + assert_overflows::>(&mut client, "'-infinity'", "timestamptz").await; + assert_overflows::>(&mut client, "'infinity'", "timestamptz").await; + + assert_overflows::(&mut client, "'-infinity'", "timestamp").await; + assert_overflows::(&mut client, "'infinity'", "timestamp").await; + + assert_overflows::(&mut client, "'-infinity'", "date").await; + assert_overflows::(&mut client, "'infinity'", "date").await; +} diff --git a/tokio-postgres/tests/test/types/eui48_04.rs b/tokio-postgres/tests/test/types/eui48_1.rs similarity index 92% rename from tokio-postgres/tests/test/types/eui48_04.rs rename to tokio-postgres/tests/test/types/eui48_1.rs index 074faa37e..0c22e9e87 100644 --- a/tokio-postgres/tests/test/types/eui48_04.rs +++ b/tokio-postgres/tests/test/types/eui48_1.rs @@ -1,4 +1,4 @@ -use eui48_04::MacAddress; +use eui48_1::MacAddress; use crate::types::test_type; diff --git a/tokio-postgres/tests/test/types/geo_010.rs b/tokio-postgres/tests/test/types/geo_types_06.rs similarity index 65% rename from tokio-postgres/tests/test/types/geo_010.rs rename to tokio-postgres/tests/test/types/geo_types_06.rs index 6e3d835b9..7195abc06 100644 --- a/tokio-postgres/tests/test/types/geo_010.rs +++ b/tokio-postgres/tests/test/types/geo_types_06.rs @@ -1,4 +1,4 @@ -use geo_010::{Coordinate, LineString, Point, Rect}; +use geo_types_06::{Coordinate, LineString, Point, Rect}; use crate::types::test_type; @@ -8,7 +8,7 @@ async fn test_point_params() { "POINT", &[ (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), - (Some(Point::new(-3.14, 1.618)), "POINT(-3.14, 1.618)"), + (Some(Point::new(-3.2, 1.618)), "POINT(-3.2, 1.618)"), (None, "NULL"), ], ) @@ -21,14 +21,14 @@ async fn test_box_params() { "BOX", &[ ( - Some(Rect { - min: Coordinate { x: -3.14, y: 1.618 }, - max: Coordinate { + Some(Rect::new( + Coordinate { x: -3.2, y: 1.618 }, + Coordinate { x: 160.0, y: 69701.5615, }, - }), - "BOX(POINT(160.0, 69701.5615), POINT(-3.14, 1.618))", + )), + "BOX(POINT(160.0, 69701.5615), POINT(-3.2, 1.618))", ), (None, "NULL"), ], @@ -40,7 +40,7 @@ async fn test_box_params() { async fn test_path_params() { let points = vec![ Coordinate { x: 0., y: 0. }, - Coordinate { x: -3.14, y: 1.618 }, + Coordinate { x: -3.2, y: 1.618 }, Coordinate { x: 160.0, y: 69701.5615, @@ -51,7 +51,7 @@ async fn test_path_params() { &[ ( Some(LineString(points)), - "path '((0, 0), (-3.14, 1.618), (160.0, 69701.5615))'", + "path '((0, 0), (-3.2, 1.618), (160.0, 69701.5615))'", ), (None, "NULL"), ], diff --git a/tokio-postgres/tests/test/types/geo_types_07.rs b/tokio-postgres/tests/test/types/geo_types_07.rs new file mode 100644 index 000000000..43a13f451 --- /dev/null +++ b/tokio-postgres/tests/test/types/geo_types_07.rs @@ -0,0 +1,61 @@ +#[cfg(feature = "with-geo-types-0_7")] +use geo_types_07::{Coord, LineString, Point, Rect}; + +use crate::types::test_type; + +#[tokio::test] +async fn test_point_params() { + test_type( + "POINT", + &[ + (Some(Point::new(0.0, 0.0)), "POINT(0, 0)"), + (Some(Point::new(-3.2, 1.618)), "POINT(-3.2, 1.618)"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_box_params() { + test_type( + "BOX", + &[ + ( + Some(Rect::new( + Coord { x: -3.2, y: 1.618 }, + Coord { + x: 160.0, + y: 69701.5615, + }, + )), + "BOX(POINT(160.0, 69701.5615), POINT(-3.2, 1.618))", + ), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_path_params() { + let points = vec![ + Coord { x: 0., y: 0. }, + Coord { x: -3.2, y: 1.618 }, + Coord { + x: 160.0, + y: 69701.5615, + }, + ]; + test_type( + "PATH", + &[ + ( + Some(LineString(points)), + "path '((0, 0), (-3.2, 1.618), (160.0, 69701.5615))'", + ), + (None, "NULL"), + ], + ) + .await; +} diff --git a/tokio-postgres/tests/test/types/jiff_01.rs b/tokio-postgres/tests/test/types/jiff_01.rs new file mode 100644 index 000000000..7c9052676 --- /dev/null +++ b/tokio-postgres/tests/test/types/jiff_01.rs @@ -0,0 +1,175 @@ +use jiff_01::{ + civil::{Date as JiffDate, DateTime, Time}, + Timestamp as JiffTimestamp, +}; +use std::fmt; +use tokio_postgres::{ + types::{Date, FromSqlOwned, Timestamp}, + Client, +}; + +use crate::connect; +use crate::types::test_type; + +#[tokio::test] +async fn test_datetime_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_datetime_params() { + fn make_check(s: &str) -> (Timestamp, &str) { + (Timestamp::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP", + &[ + make_check("'1970-01-01 00:00:00.010000000'"), + make_check("'1965-09-25 11:19:33.100314000'"), + make_check("'2010-02-09 23:11:45.120200000'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_timestamp_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_timestamp_params() { + fn make_check(s: &str) -> (Timestamp, &str) { + (Timestamp::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "TIMESTAMP WITH TIME ZONE", + &[ + make_check("'1970-01-01 00:00:00.010000000Z'"), + make_check("'1965-09-25 11:19:33.100314000Z'"), + make_check("'2010-02-09 23:11:45.120200000Z'"), + (Timestamp::PosInfinity, "'infinity'"), + (Timestamp::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_date_params() { + fn make_check(s: &str) -> (Option, &str) { + (Some(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (None, "NULL"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_with_special_date_params() { + fn make_check(s: &str) -> (Date, &str) { + (Date::Value(s.trim_matches('\'').parse().unwrap()), s) + } + test_type( + "DATE", + &[ + make_check("'1970-01-01'"), + make_check("'1965-09-25'"), + make_check("'2010-02-09'"), + (Date::PosInfinity, "'infinity'"), + (Date::NegInfinity, "'-infinity'"), + ], + ) + .await; +} + +#[tokio::test] +async fn test_time_params() { + fn make_check(s: &str) -> (Option